ngram
listlengths 0
67.8k
|
|---|
[
"def get_2d_array_visualization(items): visualization = '' for rows in items: visualization += ' '.join([str(row)",
"# y-axis while the inner loop should represent the x-axis import sys ARRAY_DIMENSION",
"are not encapsulated. They could shadow # local variables if you name them",
"] arr = [] for arr_i in xrange(ARRAY_DIMENSION): arr_temp = map(int,raw_input().strip().split(' ')) arr.append(arr_temp)",
"#!/bin/python #https://www.hackerrank.com/challenges/2d-array # KEY INSIGHTS # 1. Variables in list comprehensians are not",
"for row in hourglass: for item in row: s += item return s",
"# ] # return [ # [-1, -1, 0, -9, -2, -2, ],",
"represent the # y-axis while the inner loop should represent the x-axis import",
"loop should represent the # y-axis while the inner loop should represent the",
"current_x = a + x current_y = b + y hourglass[a][b] = arr[current_x][current_y]",
"+ y hourglass[a][b] = arr[current_x][current_y] return hourglass arr = get_array_input() greatest_sum = None",
"xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT + 1): hg = get_hourglass(arr, x, y) shg = get_hourglass_sum(hg)",
"arr = [] for arr_i in xrange(ARRAY_DIMENSION): arr_temp = map(int,raw_input().strip().split(' ')) arr.append(arr_temp) return",
"1, 0, 0, 0], # [0, 1, 0, 0, 0, 0], # [1,",
"if is_part_of_hourglass(a, b): current_x = a + x current_y = b + y",
"s def get_hourglass(arr, x, y): hourglass = [[0 for foo in xrange(HOURGLASS_WIDTH)] for",
"HOURGLASS_WIDTH = 3 HOURGLASS_HEIGHT = 3 def get_2d_array_visualization(items): visualization = '' for rows",
"items: visualization += ' '.join([str(row) for row in rows]) + '\\n' return visualization",
"= a + x current_y = b + y hourglass[a][b] = arr[current_x][current_y] return",
"2. When looping 2-dimensional arrays, the outer loop should represent the # y-axis",
"-2, -4, -4, -5], # [-7, -3, -3, -2, -9, -9], # [-1,",
"[-1, -3, -1, -2, -4, -5], # ] arr = [] for arr_i",
"shadow # local variables if you name them the same # 2. When",
"get_hourglass(arr, x, y) shg = get_hourglass_sum(hg) # print(get_2d_array_visualization(hg)) # print(shg) # print('==========') if",
"rows]) + '\\n' return visualization def get_array_input(): # return [ # [1, 1,",
"# return [ # [-1, -1, 0, -9, -2, -2, ], # [-2,",
"for arr_i in xrange(ARRAY_DIMENSION): arr_temp = map(int,raw_input().strip().split(' ')) arr.append(arr_temp) return arr def is_part_of_hourglass(x,",
"xrange(HOURGLASS_WIDTH): if is_part_of_hourglass(a, b): current_x = a + x current_y = b +",
"if greatest_sum is None: greatest_sum = shg if shg > greatest_sum: greatest_sum =",
"sys ARRAY_DIMENSION = 6 HOURGLASS_WIDTH = 3 HOURGLASS_HEIGHT = 3 def get_2d_array_visualization(items): visualization",
"def get_hourglass_sum(hourglass): s = 0 for row in hourglass: for item in row:",
"xrange(HOURGLASS_HEIGHT)] for b in xrange(HOURGLASS_HEIGHT): for a in xrange(HOURGLASS_WIDTH): if is_part_of_hourglass(a, b): current_x",
"in rows]) + '\\n' return visualization def get_array_input(): # return [ # [1,",
"0, 0], # [0, 9, 2, -4, -4, 0], # [0, 0, 0,",
"(x == 1 and y == 0) or (x == 1 and y",
"- HOURGLASS_HEIGHT + 1): hg = get_hourglass(arr, x, y) shg = get_hourglass_sum(hg) #",
"= [] for arr_i in xrange(ARRAY_DIMENSION): arr_temp = map(int,raw_input().strip().split(' ')) arr.append(arr_temp) return arr",
"get_2d_array_visualization(items): visualization = '' for rows in items: visualization += ' '.join([str(row) for",
"[] for arr_i in xrange(ARRAY_DIMENSION): arr_temp = map(int,raw_input().strip().split(' ')) arr.append(arr_temp) return arr def",
"Variables in list comprehensians are not encapsulated. They could shadow # local variables",
"== 1 and y == 2): return False else: return True def get_hourglass_sum(hourglass):",
"6 HOURGLASS_WIDTH = 3 HOURGLASS_HEIGHT = 3 def get_2d_array_visualization(items): visualization = '' for",
"True def get_hourglass_sum(hourglass): s = 0 for row in hourglass: for item in",
"while the inner loop should represent the x-axis import sys ARRAY_DIMENSION = 6",
"should represent the # y-axis while the inner loop should represent the x-axis",
"outer loop should represent the # y-axis while the inner loop should represent",
"y-axis while the inner loop should represent the x-axis import sys ARRAY_DIMENSION =",
"False else: return True def get_hourglass_sum(hourglass): s = 0 for row in hourglass:",
"= 3 def get_2d_array_visualization(items): visualization = '' for rows in items: visualization +=",
"item return s def get_hourglass(arr, x, y): hourglass = [[0 for foo in",
"if you name them the same # 2. When looping 2-dimensional arrays, the",
"def get_hourglass(arr, x, y): hourglass = [[0 for foo in xrange(HOURGLASS_WIDTH)] for bar",
"-5], # ] arr = [] for arr_i in xrange(ARRAY_DIMENSION): arr_temp = map(int,raw_input().strip().split('",
"2, -4, -4, 0], # [0, 0, 0, -2, 0, 0], # [0,",
"hourglass: for item in row: s += item return s def get_hourglass(arr, x,",
"in hourglass: for item in row: s += item return s def get_hourglass(arr,",
"# print(get_2d_array_visualization(hg)) # print(shg) # print('==========') if greatest_sum is None: greatest_sum = shg",
"hourglass[a][b] = arr[current_x][current_y] return hourglass arr = get_array_input() greatest_sum = None for x",
"0, 0, 0, 0], # [1, 1, 1, 0, 0, 0], # [0,",
"the # y-axis while the inner loop should represent the x-axis import sys",
"= get_array_input() greatest_sum = None for x in xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH + 1):",
"# [0, 0, -1, -2, -4, 0], # ] # return [ #",
"looping 2-dimensional arrays, the outer loop should represent the # y-axis while the",
"[ # [1, 1, 1, 0, 0, 0], # [0, 1, 0, 0,",
"1. Variables in list comprehensians are not encapsulated. They could shadow # local",
"encapsulated. They could shadow # local variables if you name them the same",
"0, 0], # [0, 0, -1, -2, -4, 0], # ] # return",
"'' for rows in items: visualization += ' '.join([str(row) for row in rows])",
"x current_y = b + y hourglass[a][b] = arr[current_x][current_y] return hourglass arr =",
"0 for row in hourglass: for item in row: s += item return",
"] # return [ # [-1, -1, 0, -9, -2, -2, ], #",
"== 0) or (x == 1 and y == 2): return False else:",
"'\\n' return visualization def get_array_input(): # return [ # [1, 1, 1, 0,",
"current_y = b + y hourglass[a][b] = arr[current_x][current_y] return hourglass arr = get_array_input()",
"hourglass = [[0 for foo in xrange(HOURGLASS_WIDTH)] for bar in xrange(HOURGLASS_HEIGHT)] for b",
"# [-1, -1, 0, -9, -2, -2, ], # [-2, -1, -6, -8,",
"-2, 0, 0], # [0, 0, -1, -2, -4, 0], # ] #",
"# local variables if you name them the same # 2. When looping",
"== 2): return False else: return True def get_hourglass_sum(hourglass): s = 0 for",
"[-2, -1, -6, -8, -2, -5], # [-1, -1, -1, -2, -3, -4],",
"0, -2, 0, 0], # [0, 0, -1, -2, -4, 0], # ]",
"-1, -1, -2, -3, -4], # [-1, -9, -2, -4, -4, -5], #",
"y == 0) or (x == 1 and y == 2): return False",
"and y == 2): return False else: return True def get_hourglass_sum(hourglass): s =",
"# [-2, -1, -6, -8, -2, -5], # [-1, -1, -1, -2, -3,",
"print(shg) # print('==========') if greatest_sum is None: greatest_sum = shg if shg >",
"same # 2. When looping 2-dimensional arrays, the outer loop should represent the",
"return hourglass arr = get_array_input() greatest_sum = None for x in xrange(ARRAY_DIMENSION -",
"-3, -2, -9, -9], # [-1, -3, -1, -2, -4, -5], # ]",
"hourglass arr = get_array_input() greatest_sum = None for x in xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH",
"y) shg = get_hourglass_sum(hg) # print(get_2d_array_visualization(hg)) # print(shg) # print('==========') if greatest_sum is",
"item in row: s += item return s def get_hourglass(arr, x, y): hourglass",
"or (x == 1 and y == 2): return False else: return True",
"# 2. When looping 2-dimensional arrays, the outer loop should represent the #",
"for b in xrange(HOURGLASS_HEIGHT): for a in xrange(HOURGLASS_WIDTH): if is_part_of_hourglass(a, b): current_x =",
"in list comprehensians are not encapsulated. They could shadow # local variables if",
"return visualization def get_array_input(): # return [ # [1, 1, 1, 0, 0,",
"ARRAY_DIMENSION = 6 HOURGLASS_WIDTH = 3 HOURGLASS_HEIGHT = 3 def get_2d_array_visualization(items): visualization =",
"# print('==========') if greatest_sum is None: greatest_sum = shg if shg > greatest_sum:",
"return s def get_hourglass(arr, x, y): hourglass = [[0 for foo in xrange(HOURGLASS_WIDTH)]",
"(x == 1 and y == 2): return False else: return True def",
"HOURGLASS_HEIGHT = 3 def get_2d_array_visualization(items): visualization = '' for rows in items: visualization",
"variables if you name them the same # 2. When looping 2-dimensional arrays,",
"0, 0, 0], # [0, 1, 0, 0, 0, 0], # [1, 1,",
"-9, -2, -4, -4, -5], # [-7, -3, -3, -2, -9, -9], #",
"# [0, 0, 0, -2, 0, 0], # [0, 0, -1, -2, -4,",
"in row: s += item return s def get_hourglass(arr, x, y): hourglass =",
"x-axis import sys ARRAY_DIMENSION = 6 HOURGLASS_WIDTH = 3 HOURGLASS_HEIGHT = 3 def",
"# ] arr = [] for arr_i in xrange(ARRAY_DIMENSION): arr_temp = map(int,raw_input().strip().split(' '))",
"rows in items: visualization += ' '.join([str(row) for row in rows]) + '\\n'",
"[0, 1, 0, 0, 0, 0], # [1, 1, 1, 0, 0, 0],",
"-9, -2, -2, ], # [-2, -1, -6, -8, -2, -5], # [-1,",
"[0, 0, -1, -2, -4, 0], # ] # return [ # [-1,",
"the x-axis import sys ARRAY_DIMENSION = 6 HOURGLASS_WIDTH = 3 HOURGLASS_HEIGHT = 3",
"s = 0 for row in hourglass: for item in row: s +=",
"get_array_input() greatest_sum = None for x in xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH + 1): for",
"1): hg = get_hourglass(arr, x, y) shg = get_hourglass_sum(hg) # print(get_2d_array_visualization(hg)) # print(shg)",
"-2, -2, ], # [-2, -1, -6, -8, -2, -5], # [-1, -1,",
"2): return False else: return True def get_hourglass_sum(hourglass): s = 0 for row",
"-4], # [-1, -9, -2, -4, -4, -5], # [-7, -3, -3, -2,",
"= 0 for row in hourglass: for item in row: s += item",
"KEY INSIGHTS # 1. Variables in list comprehensians are not encapsulated. They could",
"-2, -4, 0], # ] # return [ # [-1, -1, 0, -9,",
"arr def is_part_of_hourglass(x, y): if (x == 1 and y == 0) or",
"inner loop should represent the x-axis import sys ARRAY_DIMENSION = 6 HOURGLASS_WIDTH =",
"def is_part_of_hourglass(x, y): if (x == 1 and y == 0) or (x",
"[-1, -1, -1, -2, -3, -4], # [-1, -9, -2, -4, -4, -5],",
"], # [-2, -1, -6, -8, -2, -5], # [-1, -1, -1, -2,",
"represent the x-axis import sys ARRAY_DIMENSION = 6 HOURGLASS_WIDTH = 3 HOURGLASS_HEIGHT =",
"a + x current_y = b + y hourglass[a][b] = arr[current_x][current_y] return hourglass",
"0, 0, 0], # [0, 9, 2, -4, -4, 0], # [0, 0,",
"b in xrange(HOURGLASS_HEIGHT): for a in xrange(HOURGLASS_WIDTH): if is_part_of_hourglass(a, b): current_x = a",
"comprehensians are not encapsulated. They could shadow # local variables if you name",
"0], # ] # return [ # [-1, -1, 0, -9, -2, -2,",
"# return [ # [1, 1, 1, 0, 0, 0], # [0, 1,",
"arr_i in xrange(ARRAY_DIMENSION): arr_temp = map(int,raw_input().strip().split(' ')) arr.append(arr_temp) return arr def is_part_of_hourglass(x, y):",
"shg = get_hourglass_sum(hg) # print(get_2d_array_visualization(hg)) # print(shg) # print('==========') if greatest_sum is None:",
"return [ # [-1, -1, 0, -9, -2, -2, ], # [-2, -1,",
"a in xrange(HOURGLASS_WIDTH): if is_part_of_hourglass(a, b): current_x = a + x current_y =",
"# [1, 1, 1, 0, 0, 0], # [0, 9, 2, -4, -4,",
"# print(shg) # print('==========') if greatest_sum is None: greatest_sum = shg if shg",
"y == 2): return False else: return True def get_hourglass_sum(hourglass): s = 0",
"0], # [0, 0, 0, -2, 0, 0], # [0, 0, -1, -2,",
"for a in xrange(HOURGLASS_WIDTH): if is_part_of_hourglass(a, b): current_x = a + x current_y",
"' '.join([str(row) for row in rows]) + '\\n' return visualization def get_array_input(): #",
"arr_temp = map(int,raw_input().strip().split(' ')) arr.append(arr_temp) return arr def is_part_of_hourglass(x, y): if (x ==",
"= get_hourglass_sum(hg) # print(get_2d_array_visualization(hg)) # print(shg) # print('==========') if greatest_sum is None: greatest_sum",
"greatest_sum = None for x in xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH + 1): for y",
"+ '\\n' return visualization def get_array_input(): # return [ # [1, 1, 1,",
"0) or (x == 1 and y == 2): return False else: return",
"not encapsulated. They could shadow # local variables if you name them the",
"visualization def get_array_input(): # return [ # [1, 1, 1, 0, 0, 0],",
"arrays, the outer loop should represent the # y-axis while the inner loop",
"+= item return s def get_hourglass(arr, x, y): hourglass = [[0 for foo",
"0], # [0, 0, -1, -2, -4, 0], # ] # return [",
"xrange(HOURGLASS_WIDTH)] for bar in xrange(HOURGLASS_HEIGHT)] for b in xrange(HOURGLASS_HEIGHT): for a in xrange(HOURGLASS_WIDTH):",
"in items: visualization += ' '.join([str(row) for row in rows]) + '\\n' return",
"row in rows]) + '\\n' return visualization def get_array_input(): # return [ #",
"-2, ], # [-2, -1, -6, -8, -2, -5], # [-1, -1, -1,",
"is_part_of_hourglass(x, y): if (x == 1 and y == 0) or (x ==",
"INSIGHTS # 1. Variables in list comprehensians are not encapsulated. They could shadow",
"for foo in xrange(HOURGLASS_WIDTH)] for bar in xrange(HOURGLASS_HEIGHT)] for b in xrange(HOURGLASS_HEIGHT): for",
"y hourglass[a][b] = arr[current_x][current_y] return hourglass arr = get_array_input() greatest_sum = None for",
"When looping 2-dimensional arrays, the outer loop should represent the # y-axis while",
"HOURGLASS_HEIGHT + 1): hg = get_hourglass(arr, x, y) shg = get_hourglass_sum(hg) # print(get_2d_array_visualization(hg))",
"x, y): hourglass = [[0 for foo in xrange(HOURGLASS_WIDTH)] for bar in xrange(HOURGLASS_HEIGHT)]",
"-2, -3, -4], # [-1, -9, -2, -4, -4, -5], # [-7, -3,",
"x in xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH + 1): for y in xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT",
"0], # [0, 1, 0, 0, 0, 0], # [1, 1, 1, 0,",
"them the same # 2. When looping 2-dimensional arrays, the outer loop should",
"arr[current_x][current_y] return hourglass arr = get_array_input() greatest_sum = None for x in xrange(ARRAY_DIMENSION",
"in xrange(HOURGLASS_HEIGHT): for a in xrange(HOURGLASS_WIDTH): if is_part_of_hourglass(a, b): current_x = a +",
"-1, -2, -3, -4], # [-1, -9, -2, -4, -4, -5], # [-7,",
"y): hourglass = [[0 for foo in xrange(HOURGLASS_WIDTH)] for bar in xrange(HOURGLASS_HEIGHT)] for",
"bar in xrange(HOURGLASS_HEIGHT)] for b in xrange(HOURGLASS_HEIGHT): for a in xrange(HOURGLASS_WIDTH): if is_part_of_hourglass(a,",
"[-1, -9, -2, -4, -4, -5], # [-7, -3, -3, -2, -9, -9],",
"for row in rows]) + '\\n' return visualization def get_array_input(): # return [",
"get_hourglass(arr, x, y): hourglass = [[0 for foo in xrange(HOURGLASS_WIDTH)] for bar in",
"1, 1, 0, 0, 0], # [0, 1, 0, 0, 0, 0], #",
"in xrange(HOURGLASS_WIDTH): if is_part_of_hourglass(a, b): current_x = a + x current_y = b",
"0, 0], # [0, 1, 0, 0, 0, 0], # [1, 1, 1,",
"1 and y == 0) or (x == 1 and y == 2):",
"+ x current_y = b + y hourglass[a][b] = arr[current_x][current_y] return hourglass arr",
"should represent the x-axis import sys ARRAY_DIMENSION = 6 HOURGLASS_WIDTH = 3 HOURGLASS_HEIGHT",
"# [0, 1, 0, 0, 0, 0], # [1, 1, 1, 0, 0,",
"-1, -2, -4, 0], # ] # return [ # [-1, -1, 0,",
"+ 1): hg = get_hourglass(arr, x, y) shg = get_hourglass_sum(hg) # print(get_2d_array_visualization(hg)) #",
"get_hourglass_sum(hourglass): s = 0 for row in hourglass: for item in row: s",
"-3, -1, -2, -4, -5], # ] arr = [] for arr_i in",
"1, 0, 0, 0], # [0, 9, 2, -4, -4, 0], # [0,",
"get_hourglass_sum(hg) # print(get_2d_array_visualization(hg)) # print(shg) # print('==========') if greatest_sum is None: greatest_sum =",
"[-1, -1, 0, -9, -2, -2, ], # [-2, -1, -6, -8, -2,",
"# [-7, -3, -3, -2, -9, -9], # [-1, -3, -1, -2, -4,",
"-4, -5], # [-7, -3, -3, -2, -9, -9], # [-1, -3, -1,",
"+ 1): for y in xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT + 1): hg = get_hourglass(arr,",
"# [0, 9, 2, -4, -4, 0], # [0, 0, 0, -2, 0,",
"'.join([str(row) for row in rows]) + '\\n' return visualization def get_array_input(): # return",
"is_part_of_hourglass(a, b): current_x = a + x current_y = b + y hourglass[a][b]",
"greatest_sum is None: greatest_sum = shg if shg > greatest_sum: greatest_sum = shg",
"return [ # [1, 1, 1, 0, 0, 0], # [0, 1, 0,",
"[ # [-1, -1, 0, -9, -2, -2, ], # [-2, -1, -6,",
"y): if (x == 1 and y == 0) or (x == 1",
"if (x == 1 and y == 0) or (x == 1 and",
"in xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH + 1): for y in xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT +",
"-2, -9, -9], # [-1, -3, -1, -2, -4, -5], # ] arr",
"They could shadow # local variables if you name them the same #",
"[1, 1, 1, 0, 0, 0], # [0, 9, 2, -4, -4, 0],",
"2-dimensional arrays, the outer loop should represent the # y-axis while the inner",
"-2, -4, -5], # ] arr = [] for arr_i in xrange(ARRAY_DIMENSION): arr_temp",
"else: return True def get_hourglass_sum(hourglass): s = 0 for row in hourglass: for",
"[1, 1, 1, 0, 0, 0], # [0, 1, 0, 0, 0, 0],",
"in xrange(ARRAY_DIMENSION): arr_temp = map(int,raw_input().strip().split(' ')) arr.append(arr_temp) return arr def is_part_of_hourglass(x, y): if",
"')) arr.append(arr_temp) return arr def is_part_of_hourglass(x, y): if (x == 1 and y",
"-3, -3, -2, -9, -9], # [-1, -3, -1, -2, -4, -5], #",
"0, 0, -2, 0, 0], # [0, 0, -1, -2, -4, 0], #",
"-5], # [-1, -1, -1, -2, -3, -4], # [-1, -9, -2, -4,",
"foo in xrange(HOURGLASS_WIDTH)] for bar in xrange(HOURGLASS_HEIGHT)] for b in xrange(HOURGLASS_HEIGHT): for a",
"visualization += ' '.join([str(row) for row in rows]) + '\\n' return visualization def",
"return arr def is_part_of_hourglass(x, y): if (x == 1 and y == 0)",
"for y in xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT + 1): hg = get_hourglass(arr, x, y)",
"1 and y == 2): return False else: return True def get_hourglass_sum(hourglass): s",
"the outer loop should represent the # y-axis while the inner loop should",
"= None for x in xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH + 1): for y in",
"= map(int,raw_input().strip().split(' ')) arr.append(arr_temp) return arr def is_part_of_hourglass(x, y): if (x == 1",
"= get_hourglass(arr, x, y) shg = get_hourglass_sum(hg) # print(get_2d_array_visualization(hg)) # print(shg) # print('==========')",
"-1, -2, -4, -5], # ] arr = [] for arr_i in xrange(ARRAY_DIMENSION):",
"def get_array_input(): # return [ # [1, 1, 1, 0, 0, 0], #",
"# [-1, -1, -1, -2, -3, -4], # [-1, -9, -2, -4, -4,",
"[0, 0, 0, -2, 0, 0], # [0, 0, -1, -2, -4, 0],",
"-4, 0], # [0, 0, 0, -2, 0, 0], # [0, 0, -1,",
"-1, 0, -9, -2, -2, ], # [-2, -1, -6, -8, -2, -5],",
"#https://www.hackerrank.com/challenges/2d-array # KEY INSIGHTS # 1. Variables in list comprehensians are not encapsulated.",
"x, y) shg = get_hourglass_sum(hg) # print(get_2d_array_visualization(hg)) # print(shg) # print('==========') if greatest_sum",
"= [[0 for foo in xrange(HOURGLASS_WIDTH)] for bar in xrange(HOURGLASS_HEIGHT)] for b in",
"print('==========') if greatest_sum is None: greatest_sum = shg if shg > greatest_sum: greatest_sum",
"xrange(HOURGLASS_HEIGHT): for a in xrange(HOURGLASS_WIDTH): if is_part_of_hourglass(a, b): current_x = a + x",
"0], # [1, 1, 1, 0, 0, 0], # [0, 9, 2, -4,",
"1): for y in xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT + 1): hg = get_hourglass(arr, x,",
"for x in xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH + 1): for y in xrange(ARRAY_DIMENSION -",
"# KEY INSIGHTS # 1. Variables in list comprehensians are not encapsulated. They",
"the inner loop should represent the x-axis import sys ARRAY_DIMENSION = 6 HOURGLASS_WIDTH",
"xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH + 1): for y in xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT + 1):",
"= arr[current_x][current_y] return hourglass arr = get_array_input() greatest_sum = None for x in",
"for item in row: s += item return s def get_hourglass(arr, x, y):",
"0, -9, -2, -2, ], # [-2, -1, -6, -8, -2, -5], #",
"-4, 0], # ] # return [ # [-1, -1, 0, -9, -2,",
"arr.append(arr_temp) return arr def is_part_of_hourglass(x, y): if (x == 1 and y ==",
"row: s += item return s def get_hourglass(arr, x, y): hourglass = [[0",
"y in xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT + 1): hg = get_hourglass(arr, x, y) shg",
"-9, -9], # [-1, -3, -1, -2, -4, -5], # ] arr =",
"1, 0, 0, 0, 0], # [1, 1, 1, 0, 0, 0], #",
"local variables if you name them the same # 2. When looping 2-dimensional",
"map(int,raw_input().strip().split(' ')) arr.append(arr_temp) return arr def is_part_of_hourglass(x, y): if (x == 1 and",
"# [-1, -3, -1, -2, -4, -5], # ] arr = [] for",
"# 1. Variables in list comprehensians are not encapsulated. They could shadow #",
"b): current_x = a + x current_y = b + y hourglass[a][b] =",
"-6, -8, -2, -5], # [-1, -1, -1, -2, -3, -4], # [-1,",
"arr = get_array_input() greatest_sum = None for x in xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH +",
"for rows in items: visualization += ' '.join([str(row) for row in rows]) +",
"[-7, -3, -3, -2, -9, -9], # [-1, -3, -1, -2, -4, -5],",
"in xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT + 1): hg = get_hourglass(arr, x, y) shg =",
"0, 0, 0], # [1, 1, 1, 0, 0, 0], # [0, 9,",
"- HOURGLASS_WIDTH + 1): for y in xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT + 1): hg",
"return False else: return True def get_hourglass_sum(hourglass): s = 0 for row in",
"you name them the same # 2. When looping 2-dimensional arrays, the outer",
"in xrange(HOURGLASS_WIDTH)] for bar in xrange(HOURGLASS_HEIGHT)] for b in xrange(HOURGLASS_HEIGHT): for a in",
"# [1, 1, 1, 0, 0, 0], # [0, 1, 0, 0, 0,",
"0], # [0, 9, 2, -4, -4, 0], # [0, 0, 0, -2,",
"= b + y hourglass[a][b] = arr[current_x][current_y] return hourglass arr = get_array_input() greatest_sum",
"s += item return s def get_hourglass(arr, x, y): hourglass = [[0 for",
"-8, -2, -5], # [-1, -1, -1, -2, -3, -4], # [-1, -9,",
"list comprehensians are not encapsulated. They could shadow # local variables if you",
"1, 1, 0, 0, 0], # [0, 9, 2, -4, -4, 0], #",
"[0, 9, 2, -4, -4, 0], # [0, 0, 0, -2, 0, 0],",
"# [-1, -9, -2, -4, -4, -5], # [-7, -3, -3, -2, -9,",
"[[0 for foo in xrange(HOURGLASS_WIDTH)] for bar in xrange(HOURGLASS_HEIGHT)] for b in xrange(HOURGLASS_HEIGHT):",
"9, 2, -4, -4, 0], # [0, 0, 0, -2, 0, 0], #",
"import sys ARRAY_DIMENSION = 6 HOURGLASS_WIDTH = 3 HOURGLASS_HEIGHT = 3 def get_2d_array_visualization(items):",
"loop should represent the x-axis import sys ARRAY_DIMENSION = 6 HOURGLASS_WIDTH = 3",
"0, 0], # [1, 1, 1, 0, 0, 0], # [0, 9, 2,",
"return True def get_hourglass_sum(hourglass): s = 0 for row in hourglass: for item",
"3 HOURGLASS_HEIGHT = 3 def get_2d_array_visualization(items): visualization = '' for rows in items:",
"the same # 2. When looping 2-dimensional arrays, the outer loop should represent",
"-3, -4], # [-1, -9, -2, -4, -4, -5], # [-7, -3, -3,",
"in xrange(HOURGLASS_HEIGHT)] for b in xrange(HOURGLASS_HEIGHT): for a in xrange(HOURGLASS_WIDTH): if is_part_of_hourglass(a, b):",
"= '' for rows in items: visualization += ' '.join([str(row) for row in",
"print(get_2d_array_visualization(hg)) # print(shg) # print('==========') if greatest_sum is None: greatest_sum = shg if",
"-4, -4, 0], # [0, 0, 0, -2, 0, 0], # [0, 0,",
"could shadow # local variables if you name them the same # 2.",
"3 def get_2d_array_visualization(items): visualization = '' for rows in items: visualization += '",
"-4, -4, -5], # [-7, -3, -3, -2, -9, -9], # [-1, -3,",
"xrange(ARRAY_DIMENSION): arr_temp = map(int,raw_input().strip().split(' ')) arr.append(arr_temp) return arr def is_part_of_hourglass(x, y): if (x",
"get_array_input(): # return [ # [1, 1, 1, 0, 0, 0], # [0,",
"b + y hourglass[a][b] = arr[current_x][current_y] return hourglass arr = get_array_input() greatest_sum =",
"row in hourglass: for item in row: s += item return s def",
"== 1 and y == 0) or (x == 1 and y ==",
"None for x in xrange(ARRAY_DIMENSION - HOURGLASS_WIDTH + 1): for y in xrange(ARRAY_DIMENSION",
"-4, -5], # ] arr = [] for arr_i in xrange(ARRAY_DIMENSION): arr_temp =",
"-5], # [-7, -3, -3, -2, -9, -9], # [-1, -3, -1, -2,",
"HOURGLASS_WIDTH + 1): for y in xrange(ARRAY_DIMENSION - HOURGLASS_HEIGHT + 1): hg =",
"-2, -5], # [-1, -1, -1, -2, -3, -4], # [-1, -9, -2,",
"-1, -6, -8, -2, -5], # [-1, -1, -1, -2, -3, -4], #",
"for bar in xrange(HOURGLASS_HEIGHT)] for b in xrange(HOURGLASS_HEIGHT): for a in xrange(HOURGLASS_WIDTH): if",
"= 3 HOURGLASS_HEIGHT = 3 def get_2d_array_visualization(items): visualization = '' for rows in",
"0, -1, -2, -4, 0], # ] # return [ # [-1, -1,",
"+= ' '.join([str(row) for row in rows]) + '\\n' return visualization def get_array_input():",
"is None: greatest_sum = shg if shg > greatest_sum: greatest_sum = shg print(greatest_sum)",
"= 6 HOURGLASS_WIDTH = 3 HOURGLASS_HEIGHT = 3 def get_2d_array_visualization(items): visualization = ''",
"and y == 0) or (x == 1 and y == 2): return",
"visualization = '' for rows in items: visualization += ' '.join([str(row) for row",
"hg = get_hourglass(arr, x, y) shg = get_hourglass_sum(hg) # print(get_2d_array_visualization(hg)) # print(shg) #",
"name them the same # 2. When looping 2-dimensional arrays, the outer loop",
"-9], # [-1, -3, -1, -2, -4, -5], # ] arr = []"
] |
[
"Maximum time (in seconds) of total PSOSC execution (not used by default) -I",
"Helper from Particle import Particle from Tree import Tree from Data import Data",
"of Dollo(k) model used as phylogeny tree -a alpha False negative rate in",
"deletions allowed [default: +inf] -e mutfile Path of the mutation names. If not",
"assign_to_cores(os.getpid(), selected_cores) if n_particles == None: n_particles = helper.n_particles if not helper.quiet: print(\"\\n",
"p in particles: p.thread.join() def get_least_used_cores(n_cores): cpu_usage = psutil.cpu_percent(percpu=True) cores = [] for",
"for i in range(n_particles): assigned_numbers[i%(helper.cores)].append(i) # coping data into shared memory ns.best_swarm =",
"multiprocessing.Manager() assign_to_cores(manager._process.ident, selected_cores) lock = manager.Lock() ns = manager.Namespace() # selecting particles to",
"the file containing different FN rates for each mutations -b beta False positive",
"creating and starting processes processes = [] for i in range(helper.cores): processes.append(multiprocessing.Process(target =",
"import datetime import multiprocessing import threading import psutil def main(argv): arguments = docopt(__doc__,",
"alpha False negative rate in input file or path of the file containing",
"each mutations -b beta False positive rate -p particles Number of particles (single",
"data.swarm_best_likelihoods = ns.swarm_best_likelihoods data.particle_best_likelihoods = ns.particle_best_likelihoods data.iterations_performed = ns.iterations_performed data.best = ns.best_swarm.copy() data.pso_end",
"particles (single or multiple values, separated by commas, for a multiple run); by",
"Best likelihood so far\") # creating and starting processes processes = [] for",
"data = Data(helper.filename, n_particles, helper.output) data.pso_start = time.time() # creating shared memory between",
"from Data import Data import os import sys import time from docopt import",
"to cores selected_cores = get_least_used_cores(helper.cores) assign_to_cores(os.getpid(), selected_cores) if n_particles == None: n_particles =",
"mutations will be named progressively from 1 to mutations (not used by default)",
"[-t iterations] [-d max_deletions] [-e mutfile] [-T tolerance] [-m maxtime] [-I truematrix] [--quiet]",
"data def start_threads(selected_cores, assigned_numbers, data, helper, ns, lock): assign_to_cores(os.getpid(), selected_cores) particles = []",
"cores def assign_to_cores(pid, cores): proc = psutil.Process(pid) proc.cpu_affinity(cores) if __name__ == \"__main__\": main(sys.argv[1:])",
"Cell inference Usage: psosc.py (-i infile) (-c cores) (-k k) (-a alpha) (-b",
"helper, ns, lock))) for p in processes: p.start() for p in processes: p.join()",
"= [] for i in range(n_cores): c = cpu_usage.index(min(cpu_usage)) cores.append(c) cpu_usage[c] = float(\"+inf\")",
"processes.append(multiprocessing.Process(target = start_threads, args = (selected_cores, assigned_numbers[i], data, helper, ns, lock))) for p",
"ns, lock)) particles.append(p) for p in particles: p.thread.start() for p in particles: p.thread.join()",
"by default) -d max_deletions Maximum number of total deletions allowed [default: +inf] -e",
"if iterations are not used [default: 0.005] -m maxtime Maximum time (in seconds)",
"not used, mutations will be named progressively from 1 to mutations (not used",
"Matrix input file -c cores Number of CPU cores -k k K value",
"lock)) particles.append(p) for p in particles: p.thread.start() for p in particles: p.thread.join() def",
"• %d PARTICLES START-UP\" % (n_particles)) Tree.set_probabilities(helper.alpha, helper.beta) data = Data(helper.filename, n_particles, helper.output)",
"--quiet Doesn't print anything (not used by default) --output output Limit the output",
"p in processes: p.start() for p in processes: p.join() # copying back data",
"GAMMA rates for each mutations [default: 1] -t iterations Number of iterations (-m",
"arguments = docopt(__doc__, version = \"PSOSC-Cancer-Evolution 2.0\") base_dir = \"results\" + datetime.now().strftime(\"%Y%m%d%H%M%S\") helper",
"of CPU cores -k k K value of Dollo(k) model used as phylogeny",
"Tree import Tree from Data import Data import os import sys import time",
"assigned_numbers: p = Particle(helper.cells, helper.mutation_number, helper.mutation_names, i) p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells, helper.mutation_number)",
"range(helper.cores)] for i in range(n_particles): assigned_numbers[i%(helper.cores)].append(i) # coping data into shared memory ns.best_swarm",
"in range(helper.n_particles)] ns.iterations_performed = data.iterations_performed ns.stop = False ns.operations = [2,3] ns.attach =",
"in range(n_cores): c = cpu_usage.index(min(cpu_usage)) cores.append(c) cpu_usage[c] = float(\"+inf\") return cores def assign_to_cores(pid,",
"processes: p.start() for p in processes: p.join() # copying back data from shared",
"in particles: p.thread.join() def get_least_used_cores(n_cores): cpu_usage = psutil.cpu_percent(percpu=True) cores = [] for i",
"model used as phylogeny tree -a alpha False negative rate in input file",
"names. If not used, mutations will be named progressively from 1 to mutations",
"Helper import Helper from Particle import Particle from Tree import Tree from Data",
"(not used by default) --quiet Doesn't print anything (not used by default) --output",
"by default it is calculated proportionally to the size of the matrix -g",
"= p.run_iterations, args = (helper, ns, lock)) particles.append(p) for p in particles: p.thread.start()",
"by default) -I truematrix Actual correct matrix, for algorithm testing (not used by",
"mutation names. If not used, mutations will be named progressively from 1 to",
"os.path.exists(base_dir): os.makedirs(base_dir) data = pso(helper, n_particles) data.summary(helper, run_dir) runs_data.append(data) Data.runs_summary(helper.n_particles, runs_data, base_dir) else:",
"ns.best_swarm is None: ns.best_swarm = p.current_tree.copy() p.thread = threading.Thread(target = p.run_iterations, args =",
"Optimization Single Cell inference Usage: psosc.py (-i infile) (-c cores) (-k k) (-a",
"= float(\"+inf\") return cores def assign_to_cores(pid, cores): proc = psutil.Process(pid) proc.cpu_affinity(cores) if __name__",
"print(\"\\t Time\\t\\t Best likelihood so far\") # creating and starting processes processes =",
"[] for r, n_particles in enumerate(helper.n_particles): print (\"\\n\\n======= Run number %d =======\" %",
"rate -p particles Number of particles (single or multiple values, separated by commas,",
"for each mutations [default: 1] -t iterations Number of iterations (-m argument will",
"-i infile Matrix input file -c cores Number of CPU cores -k k",
"helper, ns, lock): assign_to_cores(os.getpid(), selected_cores) particles = [] for i in assigned_numbers: p",
"file containing different FN rates for each mutations -b beta False positive rate",
"Particle import Particle from Tree import Tree from Data import Data import os",
"by commas, for a multiple run); by default it is calculated proportionally to",
"i) p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells, helper.mutation_number) if ns.best_swarm is None: ns.best_swarm =",
"the output (files created) to: (image | plots | text_file | all) [default:",
"data = pso(helper) data.summary(helper, base_dir) def pso(helper, n_particles=None): # assigning process to cores",
"data.pso_start = time.time() # creating shared memory between processes manager = multiprocessing.Manager() assign_to_cores(manager._process.ident,",
"= manager.Namespace() # selecting particles to assign to processes assigned_numbers = [[] for",
"copying back data from shared memory data.swarm_best_likelihoods = ns.swarm_best_likelihoods data.particle_best_likelihoods = ns.particle_best_likelihoods data.iterations_performed",
"by default) --output output Limit the output (files created) to: (image | plots",
"not helper.quiet: print(\"\\n • %d PARTICLES START-UP\" % (n_particles)) Tree.set_probabilities(helper.alpha, helper.beta) data =",
"not used [default: 0.005] -m maxtime Maximum time (in seconds) of total PSOSC",
"= ns.particle_best_likelihoods data.iterations_performed = ns.iterations_performed data.best = ns.best_swarm.copy() data.pso_end = time.time() if not",
"output (files created) to: (image | plots | text_file | all) [default: all]",
"-m maxtime Maximum time (in seconds) of total PSOSC execution (not used by",
"in processes: p.join() # copying back data from shared memory data.swarm_best_likelihoods = ns.swarm_best_likelihoods",
"data into shared memory ns.best_swarm = None ns.swarm_best_likelihoods = [] ns.particle_best_likelihoods = [[]",
"version = \"PSOSC-Cancer-Evolution 2.0\") base_dir = \"results\" + datetime.now().strftime(\"%Y%m%d%H%M%S\") helper = Helper(arguments) if",
"for i in range(helper.cores)] for i in range(n_particles): assigned_numbers[i%(helper.cores)].append(i) # coping data into",
"used as phylogeny tree -a alpha False negative rate in input file or",
"(\"\\n\\n======= Run number %d =======\" % (r+1)) run_dir = base_dir + \"/particles%d_run%d\" %",
"ns.stop = False ns.operations = [2,3] ns.attach = True if not helper.quiet: print(\"\\n",
"= [] for i in range(helper.cores): processes.append(multiprocessing.Process(target = start_threads, args = (selected_cores, assigned_numbers[i],",
"lock))) for p in processes: p.start() for p in processes: p.join() # copying",
"data.best = ns.best_swarm.copy() data.pso_end = time.time() if not helper.quiet: print(\"\\n • FINAL RESULTS\")",
"anything (not used by default) --output output Limit the output (files created) to:",
"0 and 1) in the last iterations in order to keep going, if",
"processes processes = [] for i in range(helper.cores): processes.append(multiprocessing.Process(target = start_threads, args =",
"seconds\" % (data.n_particles, str(round(data.get_total_time(), 2)))) print(\"\\t- best likelihood: %s\\n\" % str(round(data.best.likelihood, 2))) return",
"helper.mutation_names, i) p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells, helper.mutation_number) if ns.best_swarm is None: ns.best_swarm",
"(selected_cores, assigned_numbers[i], data, helper, ns, lock))) for p in processes: p.start() for p",
"str(round(data.get_total_time(), 2)))) print(\"\\t- best likelihood: %s\\n\" % str(round(data.best.likelihood, 2))) return data def start_threads(selected_cores,",
"[-I truematrix] [--quiet] [--output output] psosc.py --help psosc.py --version Options: -i infile Matrix",
"from Tree import Tree from Data import Data import os import sys import",
"i in range(helper.cores)] for i in range(n_particles): assigned_numbers[i%(helper.cores)].append(i) # coping data into shared",
"= docopt(__doc__, version = \"PSOSC-Cancer-Evolution 2.0\") base_dir = \"results\" + datetime.now().strftime(\"%Y%m%d%H%M%S\") helper =",
"the file containing different GAMMA rates for each mutations [default: 1] -t iterations",
"def start_threads(selected_cores, assigned_numbers, data, helper, ns, lock): assign_to_cores(os.getpid(), selected_cores) particles = [] for",
"i in assigned_numbers: p = Particle(helper.cells, helper.mutation_number, helper.mutation_names, i) p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree, helper.matrix,",
"= Helper(arguments) if helper.multiple_runs: runs_data = [] for r, n_particles in enumerate(helper.n_particles): print",
"psosc.py --help psosc.py --version Options: -i infile Matrix input file -c cores Number",
"threading.Thread(target = p.run_iterations, args = (helper, ns, lock)) particles.append(p) for p in particles:",
"from shared memory data.swarm_best_likelihoods = ns.swarm_best_likelihoods data.particle_best_likelihoods = ns.particle_best_likelihoods data.iterations_performed = ns.iterations_performed data.best",
"into shared memory ns.best_swarm = None ns.swarm_best_likelihoods = [] ns.particle_best_likelihoods = [[] for",
"None: n_particles = helper.n_particles if not helper.quiet: print(\"\\n • %d PARTICLES START-UP\" %",
"file or path of the file containing different GAMMA rates for each mutations",
"psosc.py --version Options: -i infile Matrix input file -c cores Number of CPU",
"containing different GAMMA rates for each mutations [default: 1] -t iterations Number of",
"import Particle from Tree import Tree from Data import Data import os import",
"not helper.quiet: print(\"\\n • PSO RUNNING...\") print(\"\\t Time\\t\\t Best likelihood so far\") #",
"to assign to processes assigned_numbers = [[] for i in range(helper.cores)] for i",
"FINAL RESULTS\") print(\"\\t- time to complete pso with %d particles: %s seconds\" %",
"get_least_used_cores(n_cores): cpu_usage = psutil.cpu_percent(percpu=True) cores = [] for i in range(n_cores): c =",
"be named progressively from 1 to mutations (not used by default) -T tolerance",
"of total deletions allowed [default: +inf] -e mutfile Path of the mutation names.",
"rates for each mutations -b beta False positive rate -p particles Number of",
"[-d max_deletions] [-e mutfile] [-T tolerance] [-m maxtime] [-I truematrix] [--quiet] [--output output]",
"beta) [-p particles] [-g gamma] [-t iterations] [-d max_deletions] [-e mutfile] [-T tolerance]",
"to mutations (not used by default) -T tolerance Tolerance, minimum relative improvement (between",
"-t iterations Number of iterations (-m argument will be ignored; not used by",
"str(round(data.best.likelihood, 2))) return data def start_threads(selected_cores, assigned_numbers, data, helper, ns, lock): assign_to_cores(os.getpid(), selected_cores)",
"+ \"/particles%d_run%d\" % (n_particles, (r+1)) if not os.path.exists(base_dir): os.makedirs(base_dir) data = pso(helper, n_particles)",
"matrix, for algorithm testing (not used by default) --quiet Doesn't print anything (not",
"lock): assign_to_cores(os.getpid(), selected_cores) particles = [] for i in assigned_numbers: p = Particle(helper.cells,",
"cores) (-k k) (-a alpha) (-b beta) [-p particles] [-g gamma] [-t iterations]",
"pso(helper, n_particles) data.summary(helper, run_dir) runs_data.append(data) Data.runs_summary(helper.n_particles, runs_data, base_dir) else: data = pso(helper) data.summary(helper,",
"PARTICLES START-UP\" % (n_particles)) Tree.set_probabilities(helper.alpha, helper.beta) data = Data(helper.filename, n_particles, helper.output) data.pso_start =",
"memory data.swarm_best_likelihoods = ns.swarm_best_likelihoods data.particle_best_likelihoods = ns.particle_best_likelihoods data.iterations_performed = ns.iterations_performed data.best = ns.best_swarm.copy()",
"helper.quiet: print(\"\\n • PSO RUNNING...\") print(\"\\t Time\\t\\t Best likelihood so far\") # creating",
"[-p particles] [-g gamma] [-t iterations] [-d max_deletions] [-e mutfile] [-T tolerance] [-m",
"are not used [default: 0.005] -m maxtime Maximum time (in seconds) of total",
"default) -d max_deletions Maximum number of total deletions allowed [default: +inf] -e mutfile",
"helper.quiet: print(\"\\n • %d PARTICLES START-UP\" % (n_particles)) Tree.set_probabilities(helper.alpha, helper.beta) data = Data(helper.filename,",
"= start_threads, args = (selected_cores, assigned_numbers[i], data, helper, ns, lock))) for p in",
"n_particles, helper.output) data.pso_start = time.time() # creating shared memory between processes manager =",
"# copying back data from shared memory data.swarm_best_likelihoods = ns.swarm_best_likelihoods data.particle_best_likelihoods = ns.particle_best_likelihoods",
"particles: p.thread.start() for p in particles: p.thread.join() def get_least_used_cores(n_cores): cpu_usage = psutil.cpu_percent(percpu=True) cores",
"docopt import docopt from datetime import datetime import multiprocessing import threading import psutil",
"x in range(helper.n_particles)] ns.iterations_performed = data.iterations_performed ns.stop = False ns.operations = [2,3] ns.attach",
"cores selected_cores = get_least_used_cores(helper.cores) assign_to_cores(os.getpid(), selected_cores) if n_particles == None: n_particles = helper.n_particles",
"(not used by default) -I truematrix Actual correct matrix, for algorithm testing (not",
"data, helper, ns, lock))) for p in processes: p.start() for p in processes:",
"processes assigned_numbers = [[] for i in range(helper.cores)] for i in range(n_particles): assigned_numbers[i%(helper.cores)].append(i)",
"helper.matrix, helper.cells, helper.mutation_number) if ns.best_swarm is None: ns.best_swarm = p.current_tree.copy() p.thread = threading.Thread(target",
"p = Particle(helper.cells, helper.mutation_number, helper.mutation_names, i) p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells, helper.mutation_number) if",
"[-g gamma] [-t iterations] [-d max_deletions] [-e mutfile] [-T tolerance] [-m maxtime] [-I",
"from Particle import Particle from Tree import Tree from Data import Data import",
"ns.operations = [2,3] ns.attach = True if not helper.quiet: print(\"\\n • PSO RUNNING...\")",
"creating shared memory between processes manager = multiprocessing.Manager() assign_to_cores(manager._process.ident, selected_cores) lock = manager.Lock()",
"multiprocessing import threading import psutil def main(argv): arguments = docopt(__doc__, version = \"PSOSC-Cancer-Evolution",
"i in range(helper.cores): processes.append(multiprocessing.Process(target = start_threads, args = (selected_cores, assigned_numbers[i], data, helper, ns,",
"= \"results\" + datetime.now().strftime(\"%Y%m%d%H%M%S\") helper = Helper(arguments) if helper.multiple_runs: runs_data = [] for",
"p.run_iterations, args = (helper, ns, lock)) particles.append(p) for p in particles: p.thread.start() for",
"%s seconds\" % (data.n_particles, str(round(data.get_total_time(), 2)))) print(\"\\t- best likelihood: %s\\n\" % str(round(data.best.likelihood, 2)))",
"mutations [default: 1] -t iterations Number of iterations (-m argument will be ignored;",
"[] for i in range(helper.cores): processes.append(multiprocessing.Process(target = start_threads, args = (selected_cores, assigned_numbers[i], data,",
"-T tolerance Tolerance, minimum relative improvement (between 0 and 1) in the last",
"Data import Data import os import sys import time from docopt import docopt",
"--output output Limit the output (files created) to: (image | plots | text_file",
"base_dir) else: data = pso(helper) data.summary(helper, base_dir) def pso(helper, n_particles=None): # assigning process",
"named progressively from 1 to mutations (not used by default) -T tolerance Tolerance,",
"0.005] -m maxtime Maximum time (in seconds) of total PSOSC execution (not used",
"time (in seconds) of total PSOSC execution (not used by default) -I truematrix",
"to keep going, if iterations are not used [default: 0.005] -m maxtime Maximum",
"processes = [] for i in range(helper.cores): processes.append(multiprocessing.Process(target = start_threads, args = (selected_cores,",
"= (helper, ns, lock)) particles.append(p) for p in particles: p.thread.start() for p in",
"Time\\t\\t Best likelihood so far\") # creating and starting processes processes = []",
"selected_cores = get_least_used_cores(helper.cores) assign_to_cores(os.getpid(), selected_cores) if n_particles == None: n_particles = helper.n_particles if",
"Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells, helper.mutation_number) if ns.best_swarm is None: ns.best_swarm = p.current_tree.copy() p.thread =",
"data.iterations_performed = ns.iterations_performed data.best = ns.best_swarm.copy() data.pso_end = time.time() if not helper.quiet: print(\"\\n",
"for i in assigned_numbers: p = Particle(helper.cells, helper.mutation_number, helper.mutation_names, i) p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree,",
"--version Options: -i infile Matrix input file -c cores Number of CPU cores",
"in range(n_particles): assigned_numbers[i%(helper.cores)].append(i) # coping data into shared memory ns.best_swarm = None ns.swarm_best_likelihoods",
"in particles: p.thread.start() for p in particles: p.thread.join() def get_least_used_cores(n_cores): cpu_usage = psutil.cpu_percent(percpu=True)",
"1) in the last iterations in order to keep going, if iterations are",
"coping data into shared memory ns.best_swarm = None ns.swarm_best_likelihoods = [] ns.particle_best_likelihoods =",
"2)))) print(\"\\t- best likelihood: %s\\n\" % str(round(data.best.likelihood, 2))) return data def start_threads(selected_cores, assigned_numbers,",
"for a multiple run); by default it is calculated proportionally to the size",
"of the file containing different FN rates for each mutations -b beta False",
"selected_cores) particles = [] for i in assigned_numbers: p = Particle(helper.cells, helper.mutation_number, helper.mutation_names,",
"process to cores selected_cores = get_least_used_cores(helper.cores) assign_to_cores(os.getpid(), selected_cores) if n_particles == None: n_particles",
"ns.particle_best_likelihoods data.iterations_performed = ns.iterations_performed data.best = ns.best_swarm.copy() data.pso_end = time.time() if not helper.quiet:",
"mutfile] [-T tolerance] [-m maxtime] [-I truematrix] [--quiet] [--output output] psosc.py --help psosc.py",
"import time from docopt import docopt from datetime import datetime import multiprocessing import",
"assigned_numbers, data, helper, ns, lock): assign_to_cores(os.getpid(), selected_cores) particles = [] for i in",
"-c cores Number of CPU cores -k k K value of Dollo(k) model",
"ns.iterations_performed data.best = ns.best_swarm.copy() data.pso_end = time.time() if not helper.quiet: print(\"\\n • FINAL",
"alpha) (-b beta) [-p particles] [-g gamma] [-t iterations] [-d max_deletions] [-e mutfile]",
"seconds) of total PSOSC execution (not used by default) -I truematrix Actual correct",
"+inf] -e mutfile Path of the mutation names. If not used, mutations will",
"print(\"\\t- best likelihood: %s\\n\" % str(round(data.best.likelihood, 2))) return data def start_threads(selected_cores, assigned_numbers, data,",
"import multiprocessing import threading import psutil def main(argv): arguments = docopt(__doc__, version =",
"iterations (-m argument will be ignored; not used by default) -d max_deletions Maximum",
"iterations Number of iterations (-m argument will be ignored; not used by default)",
"%d =======\" % (r+1)) run_dir = base_dir + \"/particles%d_run%d\" % (n_particles, (r+1)) if",
"if n_particles == None: n_particles = helper.n_particles if not helper.quiet: print(\"\\n • %d",
"if not helper.quiet: print(\"\\n • FINAL RESULTS\") print(\"\\t- time to complete pso with",
"i in range(n_cores): c = cpu_usage.index(min(cpu_usage)) cores.append(c) cpu_usage[c] = float(\"+inf\") return cores def",
"in the last iterations in order to keep going, if iterations are not",
"file containing different GAMMA rates for each mutations [default: 1] -t iterations Number",
"Particle from Tree import Tree from Data import Data import os import sys",
"| all) [default: all] \"\"\" from Helper import Helper from Particle import Particle",
"used [default: 0.005] -m maxtime Maximum time (in seconds) of total PSOSC execution",
"def get_least_used_cores(n_cores): cpu_usage = psutil.cpu_percent(percpu=True) cores = [] for i in range(n_cores): c",
"START-UP\" % (n_particles)) Tree.set_probabilities(helper.alpha, helper.beta) data = Data(helper.filename, n_particles, helper.output) data.pso_start = time.time()",
"1 to mutations (not used by default) -T tolerance Tolerance, minimum relative improvement",
"print anything (not used by default) --output output Limit the output (files created)",
"for i in range(helper.cores): processes.append(multiprocessing.Process(target = start_threads, args = (selected_cores, assigned_numbers[i], data, helper,",
"(between 0 and 1) in the last iterations in order to keep going,",
"\"results\" + datetime.now().strftime(\"%Y%m%d%H%M%S\") helper = Helper(arguments) if helper.multiple_runs: runs_data = [] for r,",
"None: ns.best_swarm = p.current_tree.copy() p.thread = threading.Thread(target = p.run_iterations, args = (helper, ns,",
"# creating shared memory between processes manager = multiprocessing.Manager() assign_to_cores(manager._process.ident, selected_cores) lock =",
"likelihood: %s\\n\" % str(round(data.best.likelihood, 2))) return data def start_threads(selected_cores, assigned_numbers, data, helper, ns,",
"file or path of the file containing different FN rates for each mutations",
"n_particles = helper.n_particles if not helper.quiet: print(\"\\n • %d PARTICLES START-UP\" % (n_particles))",
"cores Number of CPU cores -k k K value of Dollo(k) model used",
"(-a alpha) (-b beta) [-p particles] [-g gamma] [-t iterations] [-d max_deletions] [-e",
"the last iterations in order to keep going, if iterations are not used",
"in input file or path of the file containing different FN rates for",
"be ignored; not used by default) -d max_deletions Maximum number of total deletions",
"ns.best_swarm.copy() data.pso_end = time.time() if not helper.quiet: print(\"\\n • FINAL RESULTS\") print(\"\\t- time",
"docopt(__doc__, version = \"PSOSC-Cancer-Evolution 2.0\") base_dir = \"results\" + datetime.now().strftime(\"%Y%m%d%H%M%S\") helper = Helper(arguments)",
"[] for i in assigned_numbers: p = Particle(helper.cells, helper.mutation_number, helper.mutation_names, i) p.current_tree.likelihood =",
"for algorithm testing (not used by default) --quiet Doesn't print anything (not used",
"% (n_particles, (r+1)) if not os.path.exists(base_dir): os.makedirs(base_dir) data = pso(helper, n_particles) data.summary(helper, run_dir)",
"import docopt from datetime import datetime import multiprocessing import threading import psutil def",
"time to complete pso with %d particles: %s seconds\" % (data.n_particles, str(round(data.get_total_time(), 2))))",
"%d PARTICLES START-UP\" % (n_particles)) Tree.set_probabilities(helper.alpha, helper.beta) data = Data(helper.filename, n_particles, helper.output) data.pso_start",
"data.summary(helper, base_dir) def pso(helper, n_particles=None): # assigning process to cores selected_cores = get_least_used_cores(helper.cores)",
"print (\"\\n\\n======= Run number %d =======\" % (r+1)) run_dir = base_dir + \"/particles%d_run%d\"",
"run_dir = base_dir + \"/particles%d_run%d\" % (n_particles, (r+1)) if not os.path.exists(base_dir): os.makedirs(base_dir) data",
"% (r+1)) run_dir = base_dir + \"/particles%d_run%d\" % (n_particles, (r+1)) if not os.path.exists(base_dir):",
"k) (-a alpha) (-b beta) [-p particles] [-g gamma] [-t iterations] [-d max_deletions]",
"False ns.operations = [2,3] ns.attach = True if not helper.quiet: print(\"\\n • PSO",
"[default: 0.005] -m maxtime Maximum time (in seconds) of total PSOSC execution (not",
"cores -k k K value of Dollo(k) model used as phylogeny tree -a",
"print(\"\\n • FINAL RESULTS\") print(\"\\t- time to complete pso with %d particles: %s",
"= None ns.swarm_best_likelihoods = [] ns.particle_best_likelihoods = [[] for x in range(helper.n_particles)] ns.iterations_performed",
"n_particles=None): # assigning process to cores selected_cores = get_least_used_cores(helper.cores) assign_to_cores(os.getpid(), selected_cores) if n_particles",
"selected_cores) if n_particles == None: n_particles = helper.n_particles if not helper.quiet: print(\"\\n •",
"for p in particles: p.thread.join() def get_least_used_cores(n_cores): cpu_usage = psutil.cpu_percent(percpu=True) cores = []",
"memory ns.best_swarm = None ns.swarm_best_likelihoods = [] ns.particle_best_likelihoods = [[] for x in",
"= ns.best_swarm.copy() data.pso_end = time.time() if not helper.quiet: print(\"\\n • FINAL RESULTS\") print(\"\\t-",
"gamma Loss rate in input file or path of the file containing different",
"progressively from 1 to mutations (not used by default) -T tolerance Tolerance, minimum",
"import Tree from Data import Data import os import sys import time from",
"for p in particles: p.thread.start() for p in particles: p.thread.join() def get_least_used_cores(n_cores): cpu_usage",
"p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells, helper.mutation_number) if ns.best_swarm is None: ns.best_swarm = p.current_tree.copy()",
"Tree.set_probabilities(helper.alpha, helper.beta) data = Data(helper.filename, n_particles, helper.output) data.pso_start = time.time() # creating shared",
"default) --output output Limit the output (files created) to: (image | plots |",
"truematrix] [--quiet] [--output output] psosc.py --help psosc.py --version Options: -i infile Matrix input",
"[] for i in range(n_cores): c = cpu_usage.index(min(cpu_usage)) cores.append(c) cpu_usage[c] = float(\"+inf\") return",
"input file or path of the file containing different FN rates for each",
"base_dir = \"results\" + datetime.now().strftime(\"%Y%m%d%H%M%S\") helper = Helper(arguments) if helper.multiple_runs: runs_data = []",
"data.pso_end = time.time() if not helper.quiet: print(\"\\n • FINAL RESULTS\") print(\"\\t- time to",
"% str(round(data.best.likelihood, 2))) return data def start_threads(selected_cores, assigned_numbers, data, helper, ns, lock): assign_to_cores(os.getpid(),",
"<filename>psosc.py \"\"\" Particle Swarm Optimization Single Cell inference Usage: psosc.py (-i infile) (-c",
"manager = multiprocessing.Manager() assign_to_cores(manager._process.ident, selected_cores) lock = manager.Lock() ns = manager.Namespace() # selecting",
"Usage: psosc.py (-i infile) (-c cores) (-k k) (-a alpha) (-b beta) [-p",
"-a alpha False negative rate in input file or path of the file",
"import os import sys import time from docopt import docopt from datetime import",
"= get_least_used_cores(helper.cores) assign_to_cores(os.getpid(), selected_cores) if n_particles == None: n_particles = helper.n_particles if not",
"(n_particles)) Tree.set_probabilities(helper.alpha, helper.beta) data = Data(helper.filename, n_particles, helper.output) data.pso_start = time.time() # creating",
"in assigned_numbers: p = Particle(helper.cells, helper.mutation_number, helper.mutation_names, i) p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells,",
"max_deletions Maximum number of total deletions allowed [default: +inf] -e mutfile Path of",
"calculated proportionally to the size of the matrix -g gamma Loss rate in",
"(r+1)) run_dir = base_dir + \"/particles%d_run%d\" % (n_particles, (r+1)) if not os.path.exists(base_dir): os.makedirs(base_dir)",
"print(\"\\n • %d PARTICLES START-UP\" % (n_particles)) Tree.set_probabilities(helper.alpha, helper.beta) data = Data(helper.filename, n_particles,",
"helper.quiet: print(\"\\n • FINAL RESULTS\") print(\"\\t- time to complete pso with %d particles:",
"import threading import psutil def main(argv): arguments = docopt(__doc__, version = \"PSOSC-Cancer-Evolution 2.0\")",
"mutations (not used by default) -T tolerance Tolerance, minimum relative improvement (between 0",
"for i in range(n_cores): c = cpu_usage.index(min(cpu_usage)) cores.append(c) cpu_usage[c] = float(\"+inf\") return cores",
"os import sys import time from docopt import docopt from datetime import datetime",
"and starting processes processes = [] for i in range(helper.cores): processes.append(multiprocessing.Process(target = start_threads,",
"get_least_used_cores(helper.cores) assign_to_cores(os.getpid(), selected_cores) if n_particles == None: n_particles = helper.n_particles if not helper.quiet:",
"ns, lock): assign_to_cores(os.getpid(), selected_cores) particles = [] for i in assigned_numbers: p =",
"of the mutation names. If not used, mutations will be named progressively from",
"shared memory between processes manager = multiprocessing.Manager() assign_to_cores(manager._process.ident, selected_cores) lock = manager.Lock() ns",
"in range(helper.cores): processes.append(multiprocessing.Process(target = start_threads, args = (selected_cores, assigned_numbers[i], data, helper, ns, lock)))",
"sys import time from docopt import docopt from datetime import datetime import multiprocessing",
"% (n_particles)) Tree.set_probabilities(helper.alpha, helper.beta) data = Data(helper.filename, n_particles, helper.output) data.pso_start = time.time() #",
"ignored; not used by default) -d max_deletions Maximum number of total deletions allowed",
"iterations are not used [default: 0.005] -m maxtime Maximum time (in seconds) of",
"= p.current_tree.copy() p.thread = threading.Thread(target = p.run_iterations, args = (helper, ns, lock)) particles.append(p)",
"=======\" % (r+1)) run_dir = base_dir + \"/particles%d_run%d\" % (n_particles, (r+1)) if not",
"\"/particles%d_run%d\" % (n_particles, (r+1)) if not os.path.exists(base_dir): os.makedirs(base_dir) data = pso(helper, n_particles) data.summary(helper,",
"= psutil.cpu_percent(percpu=True) cores = [] for i in range(n_cores): c = cpu_usage.index(min(cpu_usage)) cores.append(c)",
"Number of particles (single or multiple values, separated by commas, for a multiple",
"datetime import multiprocessing import threading import psutil def main(argv): arguments = docopt(__doc__, version",
"and 1) in the last iterations in order to keep going, if iterations",
"p.thread.join() def get_least_used_cores(n_cores): cpu_usage = psutil.cpu_percent(percpu=True) cores = [] for i in range(n_cores):",
"n_particles == None: n_particles = helper.n_particles if not helper.quiet: print(\"\\n • %d PARTICLES",
"of the matrix -g gamma Loss rate in input file or path of",
"Maximum number of total deletions allowed [default: +inf] -e mutfile Path of the",
"keep going, if iterations are not used [default: 0.005] -m maxtime Maximum time",
"[-m maxtime] [-I truematrix] [--quiet] [--output output] psosc.py --help psosc.py --version Options: -i",
"is None: ns.best_swarm = p.current_tree.copy() p.thread = threading.Thread(target = p.run_iterations, args = (helper,",
"else: data = pso(helper) data.summary(helper, base_dir) def pso(helper, n_particles=None): # assigning process to",
"p.current_tree.copy() p.thread = threading.Thread(target = p.run_iterations, args = (helper, ns, lock)) particles.append(p) for",
"run); by default it is calculated proportionally to the size of the matrix",
"= [2,3] ns.attach = True if not helper.quiet: print(\"\\n • PSO RUNNING...\") print(\"\\t",
"# creating and starting processes processes = [] for i in range(helper.cores): processes.append(multiprocessing.Process(target",
"base_dir + \"/particles%d_run%d\" % (n_particles, (r+1)) if not os.path.exists(base_dir): os.makedirs(base_dir) data = pso(helper,",
"(-k k) (-a alpha) (-b beta) [-p particles] [-g gamma] [-t iterations] [-d",
"used by default) --quiet Doesn't print anything (not used by default) --output output",
"complete pso with %d particles: %s seconds\" % (data.n_particles, str(round(data.get_total_time(), 2)))) print(\"\\t- best",
"Limit the output (files created) to: (image | plots | text_file | all)",
"total PSOSC execution (not used by default) -I truematrix Actual correct matrix, for",
"max_deletions] [-e mutfile] [-T tolerance] [-m maxtime] [-I truematrix] [--quiet] [--output output] psosc.py",
"by default) --quiet Doesn't print anything (not used by default) --output output Limit",
"import Data import os import sys import time from docopt import docopt from",
"relative improvement (between 0 and 1) in the last iterations in order to",
"helper.mutation_number) if ns.best_swarm is None: ns.best_swarm = p.current_tree.copy() p.thread = threading.Thread(target = p.run_iterations,",
"from Helper import Helper from Particle import Particle from Tree import Tree from",
"i in range(n_particles): assigned_numbers[i%(helper.cores)].append(i) # coping data into shared memory ns.best_swarm = None",
"testing (not used by default) --quiet Doesn't print anything (not used by default)",
"(-i infile) (-c cores) (-k k) (-a alpha) (-b beta) [-p particles] [-g",
"particles Number of particles (single or multiple values, separated by commas, for a",
"created) to: (image | plots | text_file | all) [default: all] \"\"\" from",
"def main(argv): arguments = docopt(__doc__, version = \"PSOSC-Cancer-Evolution 2.0\") base_dir = \"results\" +",
"cores = [] for i in range(n_cores): c = cpu_usage.index(min(cpu_usage)) cores.append(c) cpu_usage[c] =",
"helper.multiple_runs: runs_data = [] for r, n_particles in enumerate(helper.n_particles): print (\"\\n\\n======= Run number",
"n_particles in enumerate(helper.n_particles): print (\"\\n\\n======= Run number %d =======\" % (r+1)) run_dir =",
"assign_to_cores(os.getpid(), selected_cores) particles = [] for i in assigned_numbers: p = Particle(helper.cells, helper.mutation_number,",
"\"\"\" from Helper import Helper from Particle import Particle from Tree import Tree",
"psosc.py (-i infile) (-c cores) (-k k) (-a alpha) (-b beta) [-p particles]",
"= multiprocessing.Manager() assign_to_cores(manager._process.ident, selected_cores) lock = manager.Lock() ns = manager.Namespace() # selecting particles",
"-k k K value of Dollo(k) model used as phylogeny tree -a alpha",
"(not used by default) -T tolerance Tolerance, minimum relative improvement (between 0 and",
"processes: p.join() # copying back data from shared memory data.swarm_best_likelihoods = ns.swarm_best_likelihoods data.particle_best_likelihoods",
"cpu_usage = psutil.cpu_percent(percpu=True) cores = [] for i in range(n_cores): c = cpu_usage.index(min(cpu_usage))",
"Helper(arguments) if helper.multiple_runs: runs_data = [] for r, n_particles in enumerate(helper.n_particles): print (\"\\n\\n=======",
"separated by commas, for a multiple run); by default it is calculated proportionally",
"the matrix -g gamma Loss rate in input file or path of the",
"if not helper.quiet: print(\"\\n • PSO RUNNING...\") print(\"\\t Time\\t\\t Best likelihood so far\")",
"| text_file | all) [default: all] \"\"\" from Helper import Helper from Particle",
"assigned_numbers = [[] for i in range(helper.cores)] for i in range(n_particles): assigned_numbers[i%(helper.cores)].append(i) #",
"psutil.cpu_percent(percpu=True) cores = [] for i in range(n_cores): c = cpu_usage.index(min(cpu_usage)) cores.append(c) cpu_usage[c]",
"ns.iterations_performed = data.iterations_performed ns.stop = False ns.operations = [2,3] ns.attach = True if",
"True if not helper.quiet: print(\"\\n • PSO RUNNING...\") print(\"\\t Time\\t\\t Best likelihood so",
"for x in range(helper.n_particles)] ns.iterations_performed = data.iterations_performed ns.stop = False ns.operations = [2,3]",
"it is calculated proportionally to the size of the matrix -g gamma Loss",
"psutil def main(argv): arguments = docopt(__doc__, version = \"PSOSC-Cancer-Evolution 2.0\") base_dir = \"results\"",
"Tree from Data import Data import os import sys import time from docopt",
"• PSO RUNNING...\") print(\"\\t Time\\t\\t Best likelihood so far\") # creating and starting",
"rates for each mutations [default: 1] -t iterations Number of iterations (-m argument",
"Single Cell inference Usage: psosc.py (-i infile) (-c cores) (-k k) (-a alpha)",
"size of the matrix -g gamma Loss rate in input file or path",
"input file or path of the file containing different GAMMA rates for each",
"or path of the file containing different GAMMA rates for each mutations [default:",
"to complete pso with %d particles: %s seconds\" % (data.n_particles, str(round(data.get_total_time(), 2)))) print(\"\\t-",
"infile Matrix input file -c cores Number of CPU cores -k k K",
"= (selected_cores, assigned_numbers[i], data, helper, ns, lock))) for p in processes: p.start() for",
"for p in processes: p.start() for p in processes: p.join() # copying back",
"r, n_particles in enumerate(helper.n_particles): print (\"\\n\\n======= Run number %d =======\" % (r+1)) run_dir",
"# assigning process to cores selected_cores = get_least_used_cores(helper.cores) assign_to_cores(os.getpid(), selected_cores) if n_particles ==",
"c = cpu_usage.index(min(cpu_usage)) cores.append(c) cpu_usage[c] = float(\"+inf\") return cores def assign_to_cores(pid, cores): proc",
"\"PSOSC-Cancer-Evolution 2.0\") base_dir = \"results\" + datetime.now().strftime(\"%Y%m%d%H%M%S\") helper = Helper(arguments) if helper.multiple_runs: runs_data",
"going, if iterations are not used [default: 0.005] -m maxtime Maximum time (in",
"Data(helper.filename, n_particles, helper.output) data.pso_start = time.time() # creating shared memory between processes manager",
"to: (image | plots | text_file | all) [default: all] \"\"\" from Helper",
"commas, for a multiple run); by default it is calculated proportionally to the",
"[-e mutfile] [-T tolerance] [-m maxtime] [-I truematrix] [--quiet] [--output output] psosc.py --help",
"Data.runs_summary(helper.n_particles, runs_data, base_dir) else: data = pso(helper) data.summary(helper, base_dir) def pso(helper, n_particles=None): #",
"likelihood so far\") # creating and starting processes processes = [] for i",
"= Particle(helper.cells, helper.mutation_number, helper.mutation_names, i) p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells, helper.mutation_number) if ns.best_swarm",
"Number of CPU cores -k k K value of Dollo(k) model used as",
"% (data.n_particles, str(round(data.get_total_time(), 2)))) print(\"\\t- best likelihood: %s\\n\" % str(round(data.best.likelihood, 2))) return data",
"Loss rate in input file or path of the file containing different GAMMA",
"(-b beta) [-p particles] [-g gamma] [-t iterations] [-d max_deletions] [-e mutfile] [-T",
"is calculated proportionally to the size of the matrix -g gamma Loss rate",
"different GAMMA rates for each mutations [default: 1] -t iterations Number of iterations",
"print(\"\\t- time to complete pso with %d particles: %s seconds\" % (data.n_particles, str(round(data.get_total_time(),",
"mutfile Path of the mutation names. If not used, mutations will be named",
"data.iterations_performed ns.stop = False ns.operations = [2,3] ns.attach = True if not helper.quiet:",
"= ns.iterations_performed data.best = ns.best_swarm.copy() data.pso_end = time.time() if not helper.quiet: print(\"\\n •",
"for each mutations -b beta False positive rate -p particles Number of particles",
"helper.n_particles if not helper.quiet: print(\"\\n • %d PARTICLES START-UP\" % (n_particles)) Tree.set_probabilities(helper.alpha, helper.beta)",
"-I truematrix Actual correct matrix, for algorithm testing (not used by default) --quiet",
"as phylogeny tree -a alpha False negative rate in input file or path",
"selected_cores) lock = manager.Lock() ns = manager.Namespace() # selecting particles to assign to",
"= cpu_usage.index(min(cpu_usage)) cores.append(c) cpu_usage[c] = float(\"+inf\") return cores def assign_to_cores(pid, cores): proc =",
"= [] for r, n_particles in enumerate(helper.n_particles): print (\"\\n\\n======= Run number %d =======\"",
"if not helper.quiet: print(\"\\n • %d PARTICLES START-UP\" % (n_particles)) Tree.set_probabilities(helper.alpha, helper.beta) data",
"far\") # creating and starting processes processes = [] for i in range(helper.cores):",
"processes manager = multiprocessing.Manager() assign_to_cores(manager._process.ident, selected_cores) lock = manager.Lock() ns = manager.Namespace() #",
"in range(helper.cores)] for i in range(n_particles): assigned_numbers[i%(helper.cores)].append(i) # coping data into shared memory",
"algorithm testing (not used by default) --quiet Doesn't print anything (not used by",
"in processes: p.start() for p in processes: p.join() # copying back data from",
"infile) (-c cores) (-k k) (-a alpha) (-b beta) [-p particles] [-g gamma]",
"Doesn't print anything (not used by default) --output output Limit the output (files",
"runs_data.append(data) Data.runs_summary(helper.n_particles, runs_data, base_dir) else: data = pso(helper) data.summary(helper, base_dir) def pso(helper, n_particles=None):",
"assigned_numbers[i], data, helper, ns, lock))) for p in processes: p.start() for p in",
"used by default) -T tolerance Tolerance, minimum relative improvement (between 0 and 1)",
"in order to keep going, if iterations are not used [default: 0.005] -m",
"p.thread.start() for p in particles: p.thread.join() def get_least_used_cores(n_cores): cpu_usage = psutil.cpu_percent(percpu=True) cores =",
"for p in processes: p.join() # copying back data from shared memory data.swarm_best_likelihoods",
"of the file containing different GAMMA rates for each mutations [default: 1] -t",
"| plots | text_file | all) [default: all] \"\"\" from Helper import Helper",
"= ns.swarm_best_likelihoods data.particle_best_likelihoods = ns.particle_best_likelihoods data.iterations_performed = ns.iterations_performed data.best = ns.best_swarm.copy() data.pso_end =",
"-e mutfile Path of the mutation names. If not used, mutations will be",
"[[] for i in range(helper.cores)] for i in range(n_particles): assigned_numbers[i%(helper.cores)].append(i) # coping data",
"None ns.swarm_best_likelihoods = [] ns.particle_best_likelihoods = [[] for x in range(helper.n_particles)] ns.iterations_performed =",
"2.0\") base_dir = \"results\" + datetime.now().strftime(\"%Y%m%d%H%M%S\") helper = Helper(arguments) if helper.multiple_runs: runs_data =",
"not helper.quiet: print(\"\\n • FINAL RESULTS\") print(\"\\t- time to complete pso with %d",
"return data def start_threads(selected_cores, assigned_numbers, data, helper, ns, lock): assign_to_cores(os.getpid(), selected_cores) particles =",
"assigning process to cores selected_cores = get_least_used_cores(helper.cores) assign_to_cores(os.getpid(), selected_cores) if n_particles == None:",
"= \"PSOSC-Cancer-Evolution 2.0\") base_dir = \"results\" + datetime.now().strftime(\"%Y%m%d%H%M%S\") helper = Helper(arguments) if helper.multiple_runs:",
"Data import os import sys import time from docopt import docopt from datetime",
"= pso(helper) data.summary(helper, base_dir) def pso(helper, n_particles=None): # assigning process to cores selected_cores",
"-g gamma Loss rate in input file or path of the file containing",
"start_threads(selected_cores, assigned_numbers, data, helper, ns, lock): assign_to_cores(os.getpid(), selected_cores) particles = [] for i",
"helper.mutation_number, helper.mutation_names, i) p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells, helper.mutation_number) if ns.best_swarm is None:",
"memory between processes manager = multiprocessing.Manager() assign_to_cores(manager._process.ident, selected_cores) lock = manager.Lock() ns =",
"output Limit the output (files created) to: (image | plots | text_file |",
"(r+1)) if not os.path.exists(base_dir): os.makedirs(base_dir) data = pso(helper, n_particles) data.summary(helper, run_dir) runs_data.append(data) Data.runs_summary(helper.n_particles,",
"= False ns.operations = [2,3] ns.attach = True if not helper.quiet: print(\"\\n •",
"[--quiet] [--output output] psosc.py --help psosc.py --version Options: -i infile Matrix input file",
"gamma] [-t iterations] [-d max_deletions] [-e mutfile] [-T tolerance] [-m maxtime] [-I truematrix]",
"phylogeny tree -a alpha False negative rate in input file or path of",
"values, separated by commas, for a multiple run); by default it is calculated",
"ns.swarm_best_likelihoods = [] ns.particle_best_likelihoods = [[] for x in range(helper.n_particles)] ns.iterations_performed = data.iterations_performed",
"shared memory ns.best_swarm = None ns.swarm_best_likelihoods = [] ns.particle_best_likelihoods = [[] for x",
"= manager.Lock() ns = manager.Namespace() # selecting particles to assign to processes assigned_numbers",
"%d particles: %s seconds\" % (data.n_particles, str(round(data.get_total_time(), 2)))) print(\"\\t- best likelihood: %s\\n\" %",
"positive rate -p particles Number of particles (single or multiple values, separated by",
"helper.cells, helper.mutation_number) if ns.best_swarm is None: ns.best_swarm = p.current_tree.copy() p.thread = threading.Thread(target =",
"= [] ns.particle_best_likelihoods = [[] for x in range(helper.n_particles)] ns.iterations_performed = data.iterations_performed ns.stop",
"multiple values, separated by commas, for a multiple run); by default it is",
"(single or multiple values, separated by commas, for a multiple run); by default",
"assigned_numbers[i%(helper.cores)].append(i) # coping data into shared memory ns.best_swarm = None ns.swarm_best_likelihoods = []",
"iterations in order to keep going, if iterations are not used [default: 0.005]",
"range(n_particles): assigned_numbers[i%(helper.cores)].append(i) # coping data into shared memory ns.best_swarm = None ns.swarm_best_likelihoods =",
"args = (helper, ns, lock)) particles.append(p) for p in particles: p.thread.start() for p",
"default) -T tolerance Tolerance, minimum relative improvement (between 0 and 1) in the",
"tolerance] [-m maxtime] [-I truematrix] [--quiet] [--output output] psosc.py --help psosc.py --version Options:",
"path of the file containing different FN rates for each mutations -b beta",
"input file -c cores Number of CPU cores -k k K value of",
"def pso(helper, n_particles=None): # assigning process to cores selected_cores = get_least_used_cores(helper.cores) assign_to_cores(os.getpid(), selected_cores)",
"shared memory data.swarm_best_likelihoods = ns.swarm_best_likelihoods data.particle_best_likelihoods = ns.particle_best_likelihoods data.iterations_performed = ns.iterations_performed data.best =",
"import Helper from Particle import Particle from Tree import Tree from Data import",
"= data.iterations_performed ns.stop = False ns.operations = [2,3] ns.attach = True if not",
"datetime.now().strftime(\"%Y%m%d%H%M%S\") helper = Helper(arguments) if helper.multiple_runs: runs_data = [] for r, n_particles in",
"will be ignored; not used by default) -d max_deletions Maximum number of total",
"by default) -T tolerance Tolerance, minimum relative improvement (between 0 and 1) in",
"p in particles: p.thread.start() for p in particles: p.thread.join() def get_least_used_cores(n_cores): cpu_usage =",
"PSO RUNNING...\") print(\"\\t Time\\t\\t Best likelihood so far\") # creating and starting processes",
"1] -t iterations Number of iterations (-m argument will be ignored; not used",
"datetime import datetime import multiprocessing import threading import psutil def main(argv): arguments =",
"data, helper, ns, lock): assign_to_cores(os.getpid(), selected_cores) particles = [] for i in assigned_numbers:",
"assign to processes assigned_numbers = [[] for i in range(helper.cores)] for i in",
"Actual correct matrix, for algorithm testing (not used by default) --quiet Doesn't print",
"of iterations (-m argument will be ignored; not used by default) -d max_deletions",
"will be named progressively from 1 to mutations (not used by default) -T",
"allowed [default: +inf] -e mutfile Path of the mutation names. If not used,",
"[-T tolerance] [-m maxtime] [-I truematrix] [--quiet] [--output output] psosc.py --help psosc.py --version",
"between processes manager = multiprocessing.Manager() assign_to_cores(manager._process.ident, selected_cores) lock = manager.Lock() ns = manager.Namespace()",
"--help psosc.py --version Options: -i infile Matrix input file -c cores Number of",
"used by default) --output output Limit the output (files created) to: (image |",
"particles = [] for i in assigned_numbers: p = Particle(helper.cells, helper.mutation_number, helper.mutation_names, i)",
"time from docopt import docopt from datetime import datetime import multiprocessing import threading",
"particles.append(p) for p in particles: p.thread.start() for p in particles: p.thread.join() def get_least_used_cores(n_cores):",
"multiple run); by default it is calculated proportionally to the size of the",
"pso(helper) data.summary(helper, base_dir) def pso(helper, n_particles=None): # assigning process to cores selected_cores =",
"= threading.Thread(target = p.run_iterations, args = (helper, ns, lock)) particles.append(p) for p in",
"range(n_cores): c = cpu_usage.index(min(cpu_usage)) cores.append(c) cpu_usage[c] = float(\"+inf\") return cores def assign_to_cores(pid, cores):",
"False negative rate in input file or path of the file containing different",
"pso with %d particles: %s seconds\" % (data.n_particles, str(round(data.get_total_time(), 2)))) print(\"\\t- best likelihood:",
"used, mutations will be named progressively from 1 to mutations (not used by",
"not os.path.exists(base_dir): os.makedirs(base_dir) data = pso(helper, n_particles) data.summary(helper, run_dir) runs_data.append(data) Data.runs_summary(helper.n_particles, runs_data, base_dir)",
"order to keep going, if iterations are not used [default: 0.005] -m maxtime",
"run_dir) runs_data.append(data) Data.runs_summary(helper.n_particles, runs_data, base_dir) else: data = pso(helper) data.summary(helper, base_dir) def pso(helper,",
"inference Usage: psosc.py (-i infile) (-c cores) (-k k) (-a alpha) (-b beta)",
"data = pso(helper, n_particles) data.summary(helper, run_dir) runs_data.append(data) Data.runs_summary(helper.n_particles, runs_data, base_dir) else: data =",
"cpu_usage[c] = float(\"+inf\") return cores def assign_to_cores(pid, cores): proc = psutil.Process(pid) proc.cpu_affinity(cores) if",
"value of Dollo(k) model used as phylogeny tree -a alpha False negative rate",
"import psutil def main(argv): arguments = docopt(__doc__, version = \"PSOSC-Cancer-Evolution 2.0\") base_dir =",
"time.time() # creating shared memory between processes manager = multiprocessing.Manager() assign_to_cores(manager._process.ident, selected_cores) lock",
"argument will be ignored; not used by default) -d max_deletions Maximum number of",
"cpu_usage.index(min(cpu_usage)) cores.append(c) cpu_usage[c] = float(\"+inf\") return cores def assign_to_cores(pid, cores): proc = psutil.Process(pid)",
"False positive rate -p particles Number of particles (single or multiple values, separated",
"different FN rates for each mutations -b beta False positive rate -p particles",
"float(\"+inf\") return cores def assign_to_cores(pid, cores): proc = psutil.Process(pid) proc.cpu_affinity(cores) if __name__ ==",
"Swarm Optimization Single Cell inference Usage: psosc.py (-i infile) (-c cores) (-k k)",
"import sys import time from docopt import docopt from datetime import datetime import",
"default) -I truematrix Actual correct matrix, for algorithm testing (not used by default)",
"(-c cores) (-k k) (-a alpha) (-b beta) [-p particles] [-g gamma] [-t",
"default) --quiet Doesn't print anything (not used by default) --output output Limit the",
"all) [default: all] \"\"\" from Helper import Helper from Particle import Particle from",
"docopt from datetime import datetime import multiprocessing import threading import psutil def main(argv):",
"= pso(helper, n_particles) data.summary(helper, run_dir) runs_data.append(data) Data.runs_summary(helper.n_particles, runs_data, base_dir) else: data = pso(helper)",
"= Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells, helper.mutation_number) if ns.best_swarm is None: ns.best_swarm = p.current_tree.copy() p.thread",
"ns.best_swarm = p.current_tree.copy() p.thread = threading.Thread(target = p.run_iterations, args = (helper, ns, lock))",
"[--output output] psosc.py --help psosc.py --version Options: -i infile Matrix input file -c",
"manager.Namespace() # selecting particles to assign to processes assigned_numbers = [[] for i",
"time.time() if not helper.quiet: print(\"\\n • FINAL RESULTS\") print(\"\\t- time to complete pso",
"[] ns.particle_best_likelihoods = [[] for x in range(helper.n_particles)] ns.iterations_performed = data.iterations_performed ns.stop =",
"PSOSC execution (not used by default) -I truematrix Actual correct matrix, for algorithm",
"# coping data into shared memory ns.best_swarm = None ns.swarm_best_likelihoods = [] ns.particle_best_likelihoods",
"particles] [-g gamma] [-t iterations] [-d max_deletions] [-e mutfile] [-T tolerance] [-m maxtime]",
"2))) return data def start_threads(selected_cores, assigned_numbers, data, helper, ns, lock): assign_to_cores(os.getpid(), selected_cores) particles",
"ns.particle_best_likelihoods = [[] for x in range(helper.n_particles)] ns.iterations_performed = data.iterations_performed ns.stop = False",
"the mutation names. If not used, mutations will be named progressively from 1",
"from docopt import docopt from datetime import datetime import multiprocessing import threading import",
"(in seconds) of total PSOSC execution (not used by default) -I truematrix Actual",
"default it is calculated proportionally to the size of the matrix -g gamma",
"[2,3] ns.attach = True if not helper.quiet: print(\"\\n • PSO RUNNING...\") print(\"\\t Time\\t\\t",
"Particle(helper.cells, helper.mutation_number, helper.mutation_names, i) p.current_tree.likelihood = Tree.greedy_loglikelihood(p.current_tree, helper.matrix, helper.cells, helper.mutation_number) if ns.best_swarm is",
"= [[] for i in range(helper.cores)] for i in range(n_particles): assigned_numbers[i%(helper.cores)].append(i) # coping",
"back data from shared memory data.swarm_best_likelihoods = ns.swarm_best_likelihoods data.particle_best_likelihoods = ns.particle_best_likelihoods data.iterations_performed =",
"path of the file containing different GAMMA rates for each mutations [default: 1]",
"if not os.path.exists(base_dir): os.makedirs(base_dir) data = pso(helper, n_particles) data.summary(helper, run_dir) runs_data.append(data) Data.runs_summary(helper.n_particles, runs_data,",
"Options: -i infile Matrix input file -c cores Number of CPU cores -k",
"(n_particles, (r+1)) if not os.path.exists(base_dir): os.makedirs(base_dir) data = pso(helper, n_particles) data.summary(helper, run_dir) runs_data.append(data)",
"or path of the file containing different FN rates for each mutations -b",
"= [] for i in assigned_numbers: p = Particle(helper.cells, helper.mutation_number, helper.mutation_names, i) p.current_tree.likelihood",
"[[] for x in range(helper.n_particles)] ns.iterations_performed = data.iterations_performed ns.stop = False ns.operations =",
"# selecting particles to assign to processes assigned_numbers = [[] for i in",
"(files created) to: (image | plots | text_file | all) [default: all] \"\"\"",
"minimum relative improvement (between 0 and 1) in the last iterations in order",
"helper = Helper(arguments) if helper.multiple_runs: runs_data = [] for r, n_particles in enumerate(helper.n_particles):",
"\"\"\" Particle Swarm Optimization Single Cell inference Usage: psosc.py (-i infile) (-c cores)",
"tolerance Tolerance, minimum relative improvement (between 0 and 1) in the last iterations",
"in enumerate(helper.n_particles): print (\"\\n\\n======= Run number %d =======\" % (r+1)) run_dir = base_dir",
"-p particles Number of particles (single or multiple values, separated by commas, for",
"cores.append(c) cpu_usage[c] = float(\"+inf\") return cores def assign_to_cores(pid, cores): proc = psutil.Process(pid) proc.cpu_affinity(cores)",
"= time.time() # creating shared memory between processes manager = multiprocessing.Manager() assign_to_cores(manager._process.ident, selected_cores)",
"= True if not helper.quiet: print(\"\\n • PSO RUNNING...\") print(\"\\t Time\\t\\t Best likelihood",
"runs_data, base_dir) else: data = pso(helper) data.summary(helper, base_dir) def pso(helper, n_particles=None): # assigning",
"used by default) -d max_deletions Maximum number of total deletions allowed [default: +inf]",
"ns.best_swarm = None ns.swarm_best_likelihoods = [] ns.particle_best_likelihoods = [[] for x in range(helper.n_particles)]",
"%s\\n\" % str(round(data.best.likelihood, 2))) return data def start_threads(selected_cores, assigned_numbers, data, helper, ns, lock):",
"maxtime Maximum time (in seconds) of total PSOSC execution (not used by default)",
"• FINAL RESULTS\") print(\"\\t- time to complete pso with %d particles: %s seconds\"",
"last iterations in order to keep going, if iterations are not used [default:",
"RUNNING...\") print(\"\\t Time\\t\\t Best likelihood so far\") # creating and starting processes processes",
"data from shared memory data.swarm_best_likelihoods = ns.swarm_best_likelihoods data.particle_best_likelihoods = ns.particle_best_likelihoods data.iterations_performed = ns.iterations_performed",
"enumerate(helper.n_particles): print (\"\\n\\n======= Run number %d =======\" % (r+1)) run_dir = base_dir +",
"lock = manager.Lock() ns = manager.Namespace() # selecting particles to assign to processes",
"execution (not used by default) -I truematrix Actual correct matrix, for algorithm testing",
"the size of the matrix -g gamma Loss rate in input file or",
"starting processes processes = [] for i in range(helper.cores): processes.append(multiprocessing.Process(target = start_threads, args",
"ns, lock))) for p in processes: p.start() for p in processes: p.join() #",
"containing different FN rates for each mutations -b beta False positive rate -p",
"helper.beta) data = Data(helper.filename, n_particles, helper.output) data.pso_start = time.time() # creating shared memory",
"ns.attach = True if not helper.quiet: print(\"\\n • PSO RUNNING...\") print(\"\\t Time\\t\\t Best",
"correct matrix, for algorithm testing (not used by default) --quiet Doesn't print anything",
"data.particle_best_likelihoods = ns.particle_best_likelihoods data.iterations_performed = ns.iterations_performed data.best = ns.best_swarm.copy() data.pso_end = time.time() if",
"matrix -g gamma Loss rate in input file or path of the file",
"+ datetime.now().strftime(\"%Y%m%d%H%M%S\") helper = Helper(arguments) if helper.multiple_runs: runs_data = [] for r, n_particles",
"-d max_deletions Maximum number of total deletions allowed [default: +inf] -e mutfile Path",
"iterations] [-d max_deletions] [-e mutfile] [-T tolerance] [-m maxtime] [-I truematrix] [--quiet] [--output",
"output] psosc.py --help psosc.py --version Options: -i infile Matrix input file -c cores",
"negative rate in input file or path of the file containing different FN",
"return cores def assign_to_cores(pid, cores): proc = psutil.Process(pid) proc.cpu_affinity(cores) if __name__ == \"__main__\":",
"truematrix Actual correct matrix, for algorithm testing (not used by default) --quiet Doesn't",
"to processes assigned_numbers = [[] for i in range(helper.cores)] for i in range(n_particles):",
"manager.Lock() ns = manager.Namespace() # selecting particles to assign to processes assigned_numbers =",
"if ns.best_swarm is None: ns.best_swarm = p.current_tree.copy() p.thread = threading.Thread(target = p.run_iterations, args",
"(helper, ns, lock)) particles.append(p) for p in particles: p.thread.start() for p in particles:",
"so far\") # creating and starting processes processes = [] for i in",
"improvement (between 0 and 1) in the last iterations in order to keep",
"CPU cores -k k K value of Dollo(k) model used as phylogeny tree",
"[default: all] \"\"\" from Helper import Helper from Particle import Particle from Tree",
"args = (selected_cores, assigned_numbers[i], data, helper, ns, lock))) for p in processes: p.start()",
"selecting particles to assign to processes assigned_numbers = [[] for i in range(helper.cores)]",
"tree -a alpha False negative rate in input file or path of the",
"runs_data = [] for r, n_particles in enumerate(helper.n_particles): print (\"\\n\\n======= Run number %d",
"particles: p.thread.join() def get_least_used_cores(n_cores): cpu_usage = psutil.cpu_percent(percpu=True) cores = [] for i in",
"used by default) -I truematrix Actual correct matrix, for algorithm testing (not used",
"ns = manager.Namespace() # selecting particles to assign to processes assigned_numbers = [[]",
"data.summary(helper, run_dir) runs_data.append(data) Data.runs_summary(helper.n_particles, runs_data, base_dir) else: data = pso(helper) data.summary(helper, base_dir) def",
"(not used by default) --output output Limit the output (files created) to: (image",
"plots | text_file | all) [default: all] \"\"\" from Helper import Helper from",
"Path of the mutation names. If not used, mutations will be named progressively",
"p in processes: p.join() # copying back data from shared memory data.swarm_best_likelihoods =",
"Particle Swarm Optimization Single Cell inference Usage: psosc.py (-i infile) (-c cores) (-k",
"mutations -b beta False positive rate -p particles Number of particles (single or",
"pso(helper, n_particles=None): # assigning process to cores selected_cores = get_least_used_cores(helper.cores) assign_to_cores(os.getpid(), selected_cores) if",
"to the size of the matrix -g gamma Loss rate in input file",
"== None: n_particles = helper.n_particles if not helper.quiet: print(\"\\n • %d PARTICLES START-UP\"",
"of total PSOSC execution (not used by default) -I truematrix Actual correct matrix,",
"Dollo(k) model used as phylogeny tree -a alpha False negative rate in input",
"range(helper.cores): processes.append(multiprocessing.Process(target = start_threads, args = (selected_cores, assigned_numbers[i], data, helper, ns, lock))) for",
"particles: %s seconds\" % (data.n_particles, str(round(data.get_total_time(), 2)))) print(\"\\t- best likelihood: %s\\n\" % str(round(data.best.likelihood,",
"k K value of Dollo(k) model used as phylogeny tree -a alpha False",
"FN rates for each mutations -b beta False positive rate -p particles Number",
"If not used, mutations will be named progressively from 1 to mutations (not",
"= Data(helper.filename, n_particles, helper.output) data.pso_start = time.time() # creating shared memory between processes",
"all] \"\"\" from Helper import Helper from Particle import Particle from Tree import",
"RESULTS\") print(\"\\t- time to complete pso with %d particles: %s seconds\" % (data.n_particles,",
"with %d particles: %s seconds\" % (data.n_particles, str(round(data.get_total_time(), 2)))) print(\"\\t- best likelihood: %s\\n\"",
"rate in input file or path of the file containing different FN rates",
"print(\"\\n • PSO RUNNING...\") print(\"\\t Time\\t\\t Best likelihood so far\") # creating and",
"= [[] for x in range(helper.n_particles)] ns.iterations_performed = data.iterations_performed ns.stop = False ns.operations",
"base_dir) def pso(helper, n_particles=None): # assigning process to cores selected_cores = get_least_used_cores(helper.cores) assign_to_cores(os.getpid(),",
"p.join() # copying back data from shared memory data.swarm_best_likelihoods = ns.swarm_best_likelihoods data.particle_best_likelihoods =",
"text_file | all) [default: all] \"\"\" from Helper import Helper from Particle import",
"main(argv): arguments = docopt(__doc__, version = \"PSOSC-Cancer-Evolution 2.0\") base_dir = \"results\" + datetime.now().strftime(\"%Y%m%d%H%M%S\")",
"Run number %d =======\" % (r+1)) run_dir = base_dir + \"/particles%d_run%d\" % (n_particles,",
"start_threads, args = (selected_cores, assigned_numbers[i], data, helper, ns, lock))) for p in processes:",
"a multiple run); by default it is calculated proportionally to the size of",
"helper.output) data.pso_start = time.time() # creating shared memory between processes manager = multiprocessing.Manager()",
"from 1 to mutations (not used by default) -T tolerance Tolerance, minimum relative",
"total deletions allowed [default: +inf] -e mutfile Path of the mutation names. If",
"p.thread = threading.Thread(target = p.run_iterations, args = (helper, ns, lock)) particles.append(p) for p",
"rate in input file or path of the file containing different GAMMA rates",
"K value of Dollo(k) model used as phylogeny tree -a alpha False negative",
"= base_dir + \"/particles%d_run%d\" % (n_particles, (r+1)) if not os.path.exists(base_dir): os.makedirs(base_dir) data =",
"file -c cores Number of CPU cores -k k K value of Dollo(k)",
"Number of iterations (-m argument will be ignored; not used by default) -d",
"[default: 1] -t iterations Number of iterations (-m argument will be ignored; not",
"not used by default) -d max_deletions Maximum number of total deletions allowed [default:",
"if helper.multiple_runs: runs_data = [] for r, n_particles in enumerate(helper.n_particles): print (\"\\n\\n======= Run",
"proportionally to the size of the matrix -g gamma Loss rate in input",
"(-m argument will be ignored; not used by default) -d max_deletions Maximum number",
"range(helper.n_particles)] ns.iterations_performed = data.iterations_performed ns.stop = False ns.operations = [2,3] ns.attach = True",
"number %d =======\" % (r+1)) run_dir = base_dir + \"/particles%d_run%d\" % (n_particles, (r+1))",
"[default: +inf] -e mutfile Path of the mutation names. If not used, mutations",
"maxtime] [-I truematrix] [--quiet] [--output output] psosc.py --help psosc.py --version Options: -i infile",
"= helper.n_particles if not helper.quiet: print(\"\\n • %d PARTICLES START-UP\" % (n_particles)) Tree.set_probabilities(helper.alpha,",
"= time.time() if not helper.quiet: print(\"\\n • FINAL RESULTS\") print(\"\\t- time to complete",
"or multiple values, separated by commas, for a multiple run); by default it",
"number of total deletions allowed [default: +inf] -e mutfile Path of the mutation",
"each mutations [default: 1] -t iterations Number of iterations (-m argument will be",
"threading import psutil def main(argv): arguments = docopt(__doc__, version = \"PSOSC-Cancer-Evolution 2.0\") base_dir",
"particles to assign to processes assigned_numbers = [[] for i in range(helper.cores)] for",
"-b beta False positive rate -p particles Number of particles (single or multiple",
"best likelihood: %s\\n\" % str(round(data.best.likelihood, 2))) return data def start_threads(selected_cores, assigned_numbers, data, helper,",
"ns.swarm_best_likelihoods data.particle_best_likelihoods = ns.particle_best_likelihoods data.iterations_performed = ns.iterations_performed data.best = ns.best_swarm.copy() data.pso_end = time.time()",
"(image | plots | text_file | all) [default: all] \"\"\" from Helper import",
"beta False positive rate -p particles Number of particles (single or multiple values,",
"assign_to_cores(manager._process.ident, selected_cores) lock = manager.Lock() ns = manager.Namespace() # selecting particles to assign",
"p.start() for p in processes: p.join() # copying back data from shared memory",
"(data.n_particles, str(round(data.get_total_time(), 2)))) print(\"\\t- best likelihood: %s\\n\" % str(round(data.best.likelihood, 2))) return data def",
"Tolerance, minimum relative improvement (between 0 and 1) in the last iterations in",
"in input file or path of the file containing different GAMMA rates for",
"os.makedirs(base_dir) data = pso(helper, n_particles) data.summary(helper, run_dir) runs_data.append(data) Data.runs_summary(helper.n_particles, runs_data, base_dir) else: data",
"n_particles) data.summary(helper, run_dir) runs_data.append(data) Data.runs_summary(helper.n_particles, runs_data, base_dir) else: data = pso(helper) data.summary(helper, base_dir)",
"for r, n_particles in enumerate(helper.n_particles): print (\"\\n\\n======= Run number %d =======\" % (r+1))",
"of particles (single or multiple values, separated by commas, for a multiple run);",
"from datetime import datetime import multiprocessing import threading import psutil def main(argv): arguments"
] |
[
"= model_class.objects.get(pk=validated_data_pk) except model_class.model.DoesNotExist: pass def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb):",
"__enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass class Meta: model = None",
"instance): pk = self._get_model_pk() if validated_data and isinstance(validated_data, dict) and pk in validated_data:",
"def _get_model_pk(self): if isinstance(self, serializers.ListSerializer): model = self.child.Meta.model else: model = self.Meta.model return",
"validated_data and isinstance(validated_data, dict) and pk in validated_data: try: instance = queryset.get(pk=validated_data.get(pk)) self.instance",
"except model_class.model.DoesNotExist: pass def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass class",
"BaseNestableMixin(serializers.ModelSerializer): def _get_model_pk(self): if isinstance(self, serializers.ListSerializer): model = self.child.Meta.model else: model = self.Meta.model",
"self.instance = instance except queryset.model.DoesNotExist: pass def _set_instance_from_existing(self, validated_data, instance): pk = self._get_model_pk()",
"if self.instance is not None: model_class = instance.__class__ if self.instance.pk != validated_data_pk: try:",
"is not None: model_class = instance.__class__ if self.instance.pk != validated_data_pk: try: self.instance =",
"= instance except queryset.model.DoesNotExist: pass def _set_instance_from_existing(self, validated_data, instance): pk = self._get_model_pk() if",
"dict) and pk in validated_data: try: instance = queryset.get(pk=validated_data.get(pk)) self.instance = instance except",
"None: model_class = instance.__class__ if self.instance.pk != validated_data_pk: try: self.instance = model_class.objects.get(pk=validated_data_pk) except",
"class BaseNestableMixin(serializers.ModelSerializer): def _get_model_pk(self): if isinstance(self, serializers.ListSerializer): model = self.child.Meta.model else: model =",
"validated_data, instance): pk = self._get_model_pk() if validated_data and isinstance(validated_data, dict) and pk in",
"rest_framework import serializers class BaseNestableMixin(serializers.ModelSerializer): def _get_model_pk(self): if isinstance(self, serializers.ListSerializer): model = self.child.Meta.model",
"validated_data, queryset): pk = self._get_model_pk() self.instance = None if validated_data and isinstance(validated_data, dict)",
"from rest_framework import serializers class BaseNestableMixin(serializers.ModelSerializer): def _get_model_pk(self): if isinstance(self, serializers.ListSerializer): model =",
"_set_instance_from_queryset(self, validated_data, queryset): pk = self._get_model_pk() self.instance = None if validated_data and isinstance(validated_data,",
"return model._meta.pk.attname def _set_instance_from_queryset(self, validated_data, queryset): pk = self._get_model_pk() self.instance = None if",
"= self._get_model_pk() if validated_data and isinstance(validated_data, dict) and pk in validated_data: validated_data_pk =",
"model_class.model.DoesNotExist: pass def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass class Meta:",
"self.instance is not None: model_class = instance.__class__ if self.instance.pk != validated_data_pk: try: self.instance",
"_get_model_pk(self): if isinstance(self, serializers.ListSerializer): model = self.child.Meta.model else: model = self.Meta.model return model._meta.pk.attname",
"= self.Meta.model return model._meta.pk.attname def _set_instance_from_queryset(self, validated_data, queryset): pk = self._get_model_pk() self.instance =",
"if validated_data and isinstance(validated_data, dict) and pk in validated_data: try: instance = queryset.get(pk=validated_data.get(pk))",
"def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass class Meta: model =",
"if validated_data and isinstance(validated_data, dict) and pk in validated_data: validated_data_pk = validated_data.get(pk) if",
"else: model = self.Meta.model return model._meta.pk.attname def _set_instance_from_queryset(self, validated_data, queryset): pk = self._get_model_pk()",
"except queryset.model.DoesNotExist: pass def _set_instance_from_existing(self, validated_data, instance): pk = self._get_model_pk() if validated_data and",
"and pk in validated_data: try: instance = queryset.get(pk=validated_data.get(pk)) self.instance = instance except queryset.model.DoesNotExist:",
"validated_data and isinstance(validated_data, dict) and pk in validated_data: validated_data_pk = validated_data.get(pk) if self.instance",
"if self.instance.pk != validated_data_pk: try: self.instance = model_class.objects.get(pk=validated_data_pk) except model_class.model.DoesNotExist: pass def __enter__(self):",
"queryset.get(pk=validated_data.get(pk)) self.instance = instance except queryset.model.DoesNotExist: pass def _set_instance_from_existing(self, validated_data, instance): pk =",
"pass def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass class Meta: model",
"= validated_data.get(pk) if self.instance is not None: model_class = instance.__class__ if self.instance.pk !=",
"and pk in validated_data: validated_data_pk = validated_data.get(pk) if self.instance is not None: model_class",
"pass def _set_instance_from_existing(self, validated_data, instance): pk = self._get_model_pk() if validated_data and isinstance(validated_data, dict)",
"instance = queryset.get(pk=validated_data.get(pk)) self.instance = instance except queryset.model.DoesNotExist: pass def _set_instance_from_existing(self, validated_data, instance):",
"validated_data: validated_data_pk = validated_data.get(pk) if self.instance is not None: model_class = instance.__class__ if",
"queryset): pk = self._get_model_pk() self.instance = None if validated_data and isinstance(validated_data, dict) and",
"pk in validated_data: validated_data_pk = validated_data.get(pk) if self.instance is not None: model_class =",
"pk = self._get_model_pk() self.instance = None if validated_data and isinstance(validated_data, dict) and pk",
"= self._get_model_pk() self.instance = None if validated_data and isinstance(validated_data, dict) and pk in",
"instance except queryset.model.DoesNotExist: pass def _set_instance_from_existing(self, validated_data, instance): pk = self._get_model_pk() if validated_data",
"and isinstance(validated_data, dict) and pk in validated_data: try: instance = queryset.get(pk=validated_data.get(pk)) self.instance =",
"in validated_data: validated_data_pk = validated_data.get(pk) if self.instance is not None: model_class = instance.__class__",
"self.instance = None if validated_data and isinstance(validated_data, dict) and pk in validated_data: try:",
"self.child.Meta.model else: model = self.Meta.model return model._meta.pk.attname def _set_instance_from_queryset(self, validated_data, queryset): pk =",
"= self.child.Meta.model else: model = self.Meta.model return model._meta.pk.attname def _set_instance_from_queryset(self, validated_data, queryset): pk",
"None if validated_data and isinstance(validated_data, dict) and pk in validated_data: try: instance =",
"isinstance(self, serializers.ListSerializer): model = self.child.Meta.model else: model = self.Meta.model return model._meta.pk.attname def _set_instance_from_queryset(self,",
"self.instance = model_class.objects.get(pk=validated_data_pk) except model_class.model.DoesNotExist: pass def __enter__(self): pass def __exit__(self, exc_type, exc_val,",
"= queryset.get(pk=validated_data.get(pk)) self.instance = instance except queryset.model.DoesNotExist: pass def _set_instance_from_existing(self, validated_data, instance): pk",
"serializers class BaseNestableMixin(serializers.ModelSerializer): def _get_model_pk(self): if isinstance(self, serializers.ListSerializer): model = self.child.Meta.model else: model",
"validated_data_pk = validated_data.get(pk) if self.instance is not None: model_class = instance.__class__ if self.instance.pk",
"serializers.ListSerializer): model = self.child.Meta.model else: model = self.Meta.model return model._meta.pk.attname def _set_instance_from_queryset(self, validated_data,",
"isinstance(validated_data, dict) and pk in validated_data: validated_data_pk = validated_data.get(pk) if self.instance is not",
"import serializers class BaseNestableMixin(serializers.ModelSerializer): def _get_model_pk(self): if isinstance(self, serializers.ListSerializer): model = self.child.Meta.model else:",
"= instance.__class__ if self.instance.pk != validated_data_pk: try: self.instance = model_class.objects.get(pk=validated_data_pk) except model_class.model.DoesNotExist: pass",
"in validated_data: try: instance = queryset.get(pk=validated_data.get(pk)) self.instance = instance except queryset.model.DoesNotExist: pass def",
"self.instance.pk != validated_data_pk: try: self.instance = model_class.objects.get(pk=validated_data_pk) except model_class.model.DoesNotExist: pass def __enter__(self): pass",
"self.Meta.model return model._meta.pk.attname def _set_instance_from_queryset(self, validated_data, queryset): pk = self._get_model_pk() self.instance = None",
"!= validated_data_pk: try: self.instance = model_class.objects.get(pk=validated_data_pk) except model_class.model.DoesNotExist: pass def __enter__(self): pass def",
"def _set_instance_from_queryset(self, validated_data, queryset): pk = self._get_model_pk() self.instance = None if validated_data and",
"pk = self._get_model_pk() if validated_data and isinstance(validated_data, dict) and pk in validated_data: validated_data_pk",
"if isinstance(self, serializers.ListSerializer): model = self.child.Meta.model else: model = self.Meta.model return model._meta.pk.attname def",
"= None if validated_data and isinstance(validated_data, dict) and pk in validated_data: try: instance",
"queryset.model.DoesNotExist: pass def _set_instance_from_existing(self, validated_data, instance): pk = self._get_model_pk() if validated_data and isinstance(validated_data,",
"instance.__class__ if self.instance.pk != validated_data_pk: try: self.instance = model_class.objects.get(pk=validated_data_pk) except model_class.model.DoesNotExist: pass def",
"model_class.objects.get(pk=validated_data_pk) except model_class.model.DoesNotExist: pass def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass",
"model_class = instance.__class__ if self.instance.pk != validated_data_pk: try: self.instance = model_class.objects.get(pk=validated_data_pk) except model_class.model.DoesNotExist:",
"and isinstance(validated_data, dict) and pk in validated_data: validated_data_pk = validated_data.get(pk) if self.instance is",
"def _set_instance_from_existing(self, validated_data, instance): pk = self._get_model_pk() if validated_data and isinstance(validated_data, dict) and",
"pk in validated_data: try: instance = queryset.get(pk=validated_data.get(pk)) self.instance = instance except queryset.model.DoesNotExist: pass",
"model = self.child.Meta.model else: model = self.Meta.model return model._meta.pk.attname def _set_instance_from_queryset(self, validated_data, queryset):",
"not None: model_class = instance.__class__ if self.instance.pk != validated_data_pk: try: self.instance = model_class.objects.get(pk=validated_data_pk)",
"try: self.instance = model_class.objects.get(pk=validated_data_pk) except model_class.model.DoesNotExist: pass def __enter__(self): pass def __exit__(self, exc_type,",
"model._meta.pk.attname def _set_instance_from_queryset(self, validated_data, queryset): pk = self._get_model_pk() self.instance = None if validated_data",
"<reponame>promoteinternational/drf-nested from rest_framework import serializers class BaseNestableMixin(serializers.ModelSerializer): def _get_model_pk(self): if isinstance(self, serializers.ListSerializer): model",
"model = self.Meta.model return model._meta.pk.attname def _set_instance_from_queryset(self, validated_data, queryset): pk = self._get_model_pk() self.instance",
"isinstance(validated_data, dict) and pk in validated_data: try: instance = queryset.get(pk=validated_data.get(pk)) self.instance = instance",
"_set_instance_from_existing(self, validated_data, instance): pk = self._get_model_pk() if validated_data and isinstance(validated_data, dict) and pk",
"validated_data.get(pk) if self.instance is not None: model_class = instance.__class__ if self.instance.pk != validated_data_pk:",
"validated_data: try: instance = queryset.get(pk=validated_data.get(pk)) self.instance = instance except queryset.model.DoesNotExist: pass def _set_instance_from_existing(self,",
"dict) and pk in validated_data: validated_data_pk = validated_data.get(pk) if self.instance is not None:",
"try: instance = queryset.get(pk=validated_data.get(pk)) self.instance = instance except queryset.model.DoesNotExist: pass def _set_instance_from_existing(self, validated_data,",
"self._get_model_pk() self.instance = None if validated_data and isinstance(validated_data, dict) and pk in validated_data:",
"validated_data_pk: try: self.instance = model_class.objects.get(pk=validated_data_pk) except model_class.model.DoesNotExist: pass def __enter__(self): pass def __exit__(self,",
"self._get_model_pk() if validated_data and isinstance(validated_data, dict) and pk in validated_data: validated_data_pk = validated_data.get(pk)"
] |
[
"b_mat = net.module.features[conv_ind].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat =",
"% (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 # Create model for evaluation#net =",
"batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs =",
"transform_test = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) if(args.data_path=='CIFAR100'): trainset =",
"== 'vgg'): w_mat = net.module.features[0].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[0].bias b_mat1 =",
"n in range(base_epochs[lr_ind]): print('\\nEpoch: {}'.format(epoch)) finetune(net) test(net) epoch += 1 lr_ind += 1",
"', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 # Create model",
"%.2f' % (L_diag.cpu()/L_angle.cpu()).item()) try: w_mat = module_id[b_id].shortcut[0].weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]):",
"parser.add_argument(\"--eval_ortho\", help=\"evaluate how orthogonal a model is\", default='False', choices=['True', 'False']) parser.add_argument(\"--finetune\", help=\"fine-tune a",
"angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block',",
"{:<8}'.format(flops)) # Compression Ratio def cal_compression_ratio(net_path, model): temp_path = \"./temp_models/\" base_model = create_model(name=model,",
"batch_size=128, shuffle=False, num_workers=2) # Testing def test(net): global best_acc net.eval() test_loss = 0",
"L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item())) for conv_ind in",
"elif(name == 'resnet-56'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else:",
"inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets) test_loss",
"os.mkdir(temp_path) state = {'net': base_model.state_dict()} torch.save(state, temp_path+'temp_base.pth') base_size = os.path.getsize(temp_path+'temp_base.pth') model_size = os.path.getsize(net_path)",
"== 'True'): acc = cal_acc(net, use_loader=testloader) print(\" Test accuracy: {:.2%}\".format(acc)) ######### Orthogonality evaluation",
"-*- import torch import torchvision from torchvision import datasets, models, transforms import numpy",
"len(base_sched)): optimizer.param_groups[0]['lr'] = base_sched[lr_ind] for n in range(base_epochs[lr_ind]): print('\\nEpoch: {}'.format(epoch)) finetune(net) test(net) epoch",
"= (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(params, torch.t(params)) L_diag =",
"if acc > best_acc: print(\"best accuracy:\", acc) best_acc = acc ######### Load network",
"cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned(cfg_p)) else: net = torch.nn.DataParallel(ResNet34()) elif(name == 'resnet-56'):",
"parser.add_argument(\"-m\", \"--model\", help=\"architecture model to be analyzed\", default='vgg', choices=['vgg', 'mobilenet', 'resnet', 'resnet-56']) parser.add_argument(\"--model_path\",",
"'True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) base_sched, base_epochs, wd =",
"0 correct = 0 total = 0 for batch_idx, (inputs, targets) in enumerate(trainloader):",
"% (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 elif(args.model == 'resnet-56'): num_blocks = [9,9,9]",
"= (angle_mat.norm(1)) print(' layer_'+str(mod_id) + ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id",
"print(' layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) try: w_mat = module_id[b_id].shortcut[0].weight params =",
"torchvision.datasets.CIFAR100(root='./../data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR100(root='./../data',",
"# Accuracy def cal_acc(net, use_loader): net.eval() test_loss = 0 correct = 0 total",
"utf-8 -*- import torch import torchvision from torchvision import datasets, models, transforms import",
"= 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(use_loader):",
"+= predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total,",
"shutil.rmtree(temp_path) os.mkdir(temp_path) state = {'net': base_model.state_dict()} torch.save(state, temp_path+'temp_base.pth') base_size = os.path.getsize(temp_path+'temp_base.pth') model_size =",
"argparse.ArgumentParser() parser.add_argument(\"-m\", \"--model\", help=\"architecture model to be analyzed\", default='vgg', choices=['vgg', 'mobilenet', 'resnet', 'resnet-56'])",
"net = torch.nn.DataParallel(VGG_p(cfg_p)) else: net = torch.nn.DataParallel(VGG()) elif(name == 'mobilenet'): if(is_pruned == True):",
"pruned_epochs_iter, wd_iter best_acc = 0 lr_ind = 0 epoch = 0 optimizer =",
"model name ######### print((args.model).upper()) ######### Dataloader ######### if(args.train_acc == 'True' or args.test_acc ==",
"= (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item())) for conv_ind in [3, 7, 10, 14,",
"== 'True' or args.finetune == 'True'): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5),",
"torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f'",
"pruned model?\", default='False', choices=['True', 'False']) parser.add_argument(\"--train_acc\", help=\"evaluate train accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--test_acc\",",
"get_model_complexity_info import argparse ######### Parser ######### parser = argparse.ArgumentParser() parser.add_argument(\"-m\", \"--model\", help=\"architecture model",
"flops, params = get_model_complexity_info(net, (3, 32, 32), as_strings=False, print_per_layer_stat=False) print(' FLOPs: {:<8}'.format(flops)) #",
"net = torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else: net = torch.nn.DataParallel(ResNet56()) return net ######### Print model name",
"Accuracy def cal_acc(net, use_loader): net.eval() test_loss = 0 correct = 0 total =",
"== 'True'): cal_flops(net) ######### Compression ratio evaluation ######### if(args.compression == 'True'): cal_compression_ratio(net_path=args.model_path, model=args.model)",
"# Orthogonality evaluator def eval_ortho(): if(args.model == 'vgg'): w_mat = net.module.features[0].weight w_mat1 =",
"str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) w_mat = module_id[b_id].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]):",
"compression ratio for model\", default='False', choices=['True', 'False']) parser.add_argument(\"--eval_ortho\", help=\"evaluate how orthogonal a model",
"except: pass mod_id += 1 # Create model for evaluation#net = torch.nn.DataParallel(VGG()) def",
"elif(name == 'resnet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned(cfg_p)) else:",
"batch_size=128, shuffle=True, num_workers=2) testset = datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2)",
"network or create new ######### if(args.train_acc == 'True' or args.test_acc == 'True' or",
"len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) acc",
"= 0 optimizer = optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd) while(lr_ind < len(base_sched)): optimizer.param_groups[0]['lr'] =",
"optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd) while(lr_ind < len(base_sched)): optimizer.param_groups[0]['lr'] = base_sched[lr_ind] for n in",
"= (angle_mat.norm(1)) print(\" Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for lnum in range(13): w_mat = net.module.layers[lnum].conv1.weight params",
"base_model.state_dict()} torch.save(state, temp_path+'temp_base.pth') base_size = os.path.getsize(temp_path+'temp_base.pth') model_size = os.path.getsize(net_path) print(\" Compression ratio: {:.3}\".format(base_size",
"= (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -pointwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'resnet'):",
"print_per_layer_stat=False) print(' FLOPs: {:<8}'.format(flops)) # Compression Ratio def cal_compression_ratio(net_path, model): temp_path = \"./temp_models/\"",
"models import * from pruner import * from config import * from ptflops",
"print(\" Test accuracy: {:.2%}\".format(acc)) ######### Orthogonality evaluation ######### if(args.eval_ortho == 'True'): eval_ortho() if(args.finetune",
"(w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" base",
"in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params =",
"epoch = 0 optimizer = optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd) while(lr_ind < len(base_sched)): optimizer.param_groups[0]['lr']",
"= inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step()",
"shutil.rmtree(temp_path) # Fine-tune def finetune(net): net.train() train_loss = 0 correct = 0 total",
"net_dict['cfg'] net = torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:])) else: net = torch.nn.DataParallel(MobileNet()) elif(name == 'resnet'): if(is_pruned",
"accuracy evaluation ######### if(args.train_acc == 'True'): acc = cal_acc(net, use_loader=trainloader) print(\" Train accuracy:",
"torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f' %",
"L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -pointwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'resnet'): num_blocks =",
"transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) if(args.data_path=='CIFAR100'): trainset = torchvision.datasets.CIFAR100(root='./../data', train=True, download=True, transform=transform)",
"+= targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'",
"(angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) try: w_mat",
"if os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path) state = {'net': base_model.state_dict()} torch.save(state, temp_path+'temp_base.pth') base_size = os.path.getsize(temp_path+'temp_base.pth')",
"Train accuracy evaluation ######### if(args.train_acc == 'True'): acc = cal_acc(net, use_loader=trainloader) print(\" Train",
"% (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) acc = 100. * correct/total if acc >",
"choices=['True', 'False']) parser.add_argument(\"--eval_ortho\", help=\"evaluate how orthogonal a model is\", default='False', choices=['True', 'False']) parser.add_argument(\"--finetune\",",
"if(args.model == 'vgg'): w_mat = net.module.features[0].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[0].bias b_mat1",
"0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs,",
"batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs)",
"weight_decay=wd) while(lr_ind < len(base_sched)): optimizer.param_groups[0]['lr'] = base_sched[lr_ind] for n in range(base_epochs[lr_ind]): print('\\nEpoch: {}'.format(epoch))",
"b_mat1), dim=1) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\"",
"%.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 # Create model for evaluation#net",
"name ######### print((args.model).upper()) ######### Dataloader ######### if(args.train_acc == 'True' or args.test_acc == 'True'",
"ratio: {:.3}\".format(base_size / model_size)) shutil.rmtree(temp_path) # Fine-tune def finetune(net): net.train() train_loss = 0",
"os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path) state = {'net': base_model.state_dict()} torch.save(state, temp_path+'temp_base.pth') base_size = os.path.getsize(temp_path+'temp_base.pth') model_size",
"torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for lnum",
"progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))",
"criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct",
"choices=['True', 'False']) parser.add_argument(\"--train_acc\", help=\"evaluate train accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--test_acc\", help=\"evaluate test accuracy\",",
"torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id",
"help=\"evaluate train accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--test_acc\", help=\"evaluate test accuracy\", default='False', choices=['True', 'False'])",
"% (L_diag.cpu()/L_angle.cpu()).item()) try: w_mat = module_id[b_id].shortcut[0].weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params",
"(angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -pointwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'resnet'): num_blocks",
"model_size)) shutil.rmtree(temp_path) # Fine-tune def finetune(net): net.train() train_loss = 0 correct = 0",
"= cal_acc(net, use_loader=trainloader) print(\" Train accuracy: {:.2%}\".format(acc)) ######### Test accuracy evaluation ######### if(args.test_acc",
"wd = pruned_sched_iter, pruned_epochs_iter, wd_iter best_acc = 0 lr_ind = 0 epoch =",
"model\", default='False', choices=['True', 'False']) args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else",
"= torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" base layer:\", (L_diag.cpu()/L_angle.cpu()).item())",
"= module_id[b_id].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat =",
"inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets)",
"(angle_mat.norm(1)) print(\" Conv_{ind} -depthwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat = net.module.layers[lnum].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat",
"with torch.cuda.device(0): flops, params = get_model_complexity_info(net, (3, 32, 32), as_strings=False, print_per_layer_stat=False) print(' FLOPs:",
"######### Test accuracy evaluation ######### if(args.test_acc == 'True'): acc = cal_acc(net, use_loader=testloader) print(\"",
"for model\", default='False', choices=['True', 'False']) parser.add_argument(\"--eval_ortho\", help=\"evaluate how orthogonal a model is\", default='False',",
"in a model\", default='False', choices=['True', 'False']) parser.add_argument(\"--compression\", help=\"calculate compression ratio for model\", default='False',",
"import torch.optim as optim import os import shutil from models import * from",
"= (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\"",
"'True' or args.test_acc == 'True' or args.finetune == 'True'): transform = transforms.Compose( [transforms.ToTensor(),",
"== True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:])) else: net = torch.nn.DataParallel(MobileNet())",
"analyzed is stored\", default='0') parser.add_argument(\"--data_path\", help=\"path to dataset\", default='CIFAR100') parser.add_argument(\"--pruned\", help=\"is the model",
"eval_ortho(): if(args.model == 'vgg'): w_mat = net.module.features[0].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[0].bias",
"else: net = torch.nn.DataParallel(MobileNet()) elif(name == 'resnet'): if(is_pruned == True): cfg_p = net_dict['cfg']",
"orthogonal a model is\", default='False', choices=['True', 'False']) parser.add_argument(\"--finetune\", help=\"fine-tune a model\", default='False', choices=['True',",
"in [net.module.layer1, net.module.layer2, net.module.layer3]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params =",
"np import torch.optim as optim import os import shutil from models import *",
"datasets, models, transforms import numpy as np import torch.optim as optim import os",
"Test accuracy: {:.2%}\".format(acc)) ######### Orthogonality evaluation ######### if(args.eval_ortho == 'True'): eval_ortho() if(args.finetune ==",
"'vgg'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(VGG_p(cfg_p)) else: net =",
"params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -depthwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat",
"'resnet'): num_blocks = [3,4,6,3] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params),",
"= module_id[b_id].shortcut[0].weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat =",
"True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else: net = torch.nn.DataParallel(ResNet56()) return net",
"0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(use_loader): inputs, targets = inputs.to(device),",
"loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader),",
"parser = argparse.ArgumentParser() parser.add_argument(\"-m\", \"--model\", help=\"architecture model to be analyzed\", default='vgg', choices=['vgg', 'mobilenet',",
"[net.module.layer1, net.module.layer2, net.module.layer3]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1))",
"torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) # Testing def test(net): global best_acc net.eval() test_loss =",
"Conv_{ind}: {num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item())) for conv_ind in [3, 7, 10, 14, 17, 21, 24,",
"= torch.nn.DataParallel(ResNet56()) return net ######### Print model name ######### print((args.model).upper()) ######### Dataloader #########",
"> best_acc: print(\"best accuracy:\", acc) best_acc = acc ######### Load network or create",
"from pruner import * from config import * from ptflops import get_model_complexity_info import",
"torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) base_sched, base_epochs, wd = pruned_sched_iter, pruned_epochs_iter, wd_iter",
"(inputs, targets) in enumerate(use_loader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss",
"True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned(cfg_p)) else: net = torch.nn.DataParallel(ResNet34()) elif(name ==",
"accuracy: {:.2%}\".format(acc)) ######### Test accuracy evaluation ######### if(args.test_acc == 'True'): acc = cal_acc(net,",
"if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:])) else: net =",
"angle_mat = torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -pointwise:",
"use_loader=testloader) print(\" Test accuracy: {:.2%}\".format(acc)) ######### Orthogonality evaluation ######### if(args.eval_ortho == 'True'): eval_ortho()",
"if(params.shape[1] < params.shape[0]): params = params.t() angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1))",
"coding: utf-8 -*- import torch import torchvision from torchvision import datasets, models, transforms",
"targets) in enumerate(use_loader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss =",
"model_size = os.path.getsize(net_path) print(\" Compression ratio: {:.3}\".format(base_size / model_size)) shutil.rmtree(temp_path) # Fine-tune def",
"######### Train accuracy evaluation ######### if(args.train_acc == 'True'): acc = cal_acc(net, use_loader=trainloader) print(\"",
"params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -pointwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model",
"== 'resnet-56'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else: net",
"######### if(args.train_acc == 'True' or args.test_acc == 'True' or args.flops=='True' or args.eval_ortho=='True'): net_dict",
"= {'net': base_model.state_dict()} torch.save(state, temp_path+'temp_base.pth') base_size = os.path.getsize(temp_path+'temp_base.pth') model_size = os.path.getsize(net_path) print(\" Compression",
"evaluation ######### if(args.flops == 'True'): cal_flops(net) ######### Compression ratio evaluation ######### if(args.compression ==",
"num_workers=2) # Testing def test(net): global best_acc net.eval() test_loss = 0 correct =",
"temp_path+'temp_base.pth') base_size = os.path.getsize(temp_path+'temp_base.pth') model_size = os.path.getsize(net_path) print(\" Compression ratio: {:.3}\".format(base_size / model_size))",
"criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total +=",
"+ ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 elif(args.model ==",
"print(\" Train accuracy: {:.2%}\".format(acc)) ######### Test accuracy evaluation ######### if(args.test_acc == 'True'): acc",
"correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1),",
"params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat = torch.matmul(params, torch.t(params))",
"trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test) testloader = torch.utils.data.DataLoader(testset,",
"= (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -depthwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat = net.module.layers[lnum].conv2.weight",
"net.module.layer4]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] <",
"targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets) test_loss +=",
"args.test_acc == 'True' or args.finetune == 'True'): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5,",
"0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(use_loader): inputs,",
"= torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128,",
"(L_diag.cpu()/L_angle.cpu()).item()) w_mat = module_id[b_id].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t()",
"test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item()",
"0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device),",
"if(args.test_acc == 'True'): acc = cal_acc(net, use_loader=testloader) print(\" Test accuracy: {:.2%}\".format(acc)) ######### Orthogonality",
"parser.add_argument(\"--train_acc\", help=\"evaluate train accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--test_acc\", help=\"evaluate test accuracy\", default='False', choices=['True',",
"torch.cuda.device(0): flops, params = get_model_complexity_info(net, (3, 32, 32), as_strings=False, print_per_layer_stat=False) print(' FLOPs: {:<8}'.format(flops))",
"######### parser = argparse.ArgumentParser() parser.add_argument(\"-m\", \"--model\", help=\"architecture model to be analyzed\", default='vgg', choices=['vgg',",
"######### FLOPs evaluation ######### if(args.flops == 'True'): cal_flops(net) ######### Compression ratio evaluation #########",
"conv_ind in [3, 7, 10, 14, 17, 21, 24, 28, 31]: w_mat =",
"= get_model_complexity_info(net, (3, 32, 32), as_strings=False, print_per_layer_stat=False) print(' FLOPs: {:<8}'.format(flops)) # Compression Ratio",
"(angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id) + ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except:",
"= base_sched[lr_ind] for n in range(base_epochs[lr_ind]): print('\\nEpoch: {}'.format(epoch)) finetune(net) test(net) epoch += 1",
"100.*correct/total, correct, total)) acc = 100. * correct/total if acc > best_acc: print(\"best",
"L_angle = (angle_mat.norm(1)) print(\" Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for lnum in range(13): w_mat = net.module.layers[lnum].conv1.weight",
"net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) ######### FLOPs evaluation ######### if(args.flops == 'True'): cal_flops(net)",
"0 optimizer = optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd) while(lr_ind < len(base_sched)): optimizer.param_groups[0]['lr'] = base_sched[lr_ind]",
"{num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'resnet'): num_blocks = [3,4,6,3] w_mat = net.module.conv1.weight params =",
"0.5))]) transform_test = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) if(args.data_path=='CIFAR100'): trainset",
"argparse ######### Parser ######### parser = argparse.ArgumentParser() parser.add_argument(\"-m\", \"--model\", help=\"architecture model to be",
"choices=['vgg', 'mobilenet', 'resnet', 'resnet-56']) parser.add_argument(\"--model_path\", help=\"path where the model to be analyzed is",
"net.module.features[conv_ind].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(params, torch.t(params))",
"= criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total",
"cfg_p[1:])) else: net = torch.nn.DataParallel(MobileNet()) elif(name == 'resnet'): if(is_pruned == True): cfg_p =",
"* from ptflops import get_model_complexity_info import argparse ######### Parser ######### parser = argparse.ArgumentParser()",
"accuracy evaluation ######### if(args.test_acc == 'True'): acc = cal_acc(net, use_loader=testloader) print(\" Test accuracy:",
"'resnet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned(cfg_p)) else: net =",
"evaluate different properties ######### # Accuracy def cal_acc(net, use_loader): net.eval() test_loss = 0",
"Ratio def cal_compression_ratio(net_path, model): temp_path = \"./temp_models/\" base_model = create_model(name=model, is_pruned=False) if os.path.exists(temp_path):",
"wd_iter best_acc = 0 lr_ind = 0 epoch = 0 optimizer = optim.SGD(net.parameters(),",
"total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(use_loader): inputs, targets",
"for evaluation#net = torch.nn.DataParallel(VGG()) def create_model(name, is_pruned): if(name == 'vgg'): if(is_pruned == True):",
"torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) ######### FLOPs evaluation ######### if(args.flops == 'True'):",
"w_mat = module_id[b_id].shortcut[0].weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat",
"17, 21, 24, 28, 31]: w_mat = net.module.features[conv_ind].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat =",
"def finetune(net): net.train() train_loss = 0 correct = 0 total = 0 for",
"targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted",
"= 0 for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device)",
"= module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat =",
"parser.add_argument(\"--model_path\", help=\"path where the model to be analyzed is stored\", default='0') parser.add_argument(\"--data_path\", help=\"path",
"pruned_sched_iter, pruned_epochs_iter, wd_iter best_acc = 0 lr_ind = 0 epoch = 0 optimizer",
"train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item()",
"cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:])) else: net = torch.nn.DataParallel(MobileNet()) elif(name ==",
"import get_model_complexity_info import argparse ######### Parser ######### parser = argparse.ArgumentParser() parser.add_argument(\"-m\", \"--model\", help=\"architecture",
"torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False,",
"models, transforms import numpy as np import torch.optim as optim import os import",
"'False']) parser.add_argument(\"--train_acc\", help=\"evaluate train accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--test_acc\", help=\"evaluate test accuracy\", default='False',",
"num_blocks = [3,4,6,3] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params)",
"targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss +=",
"= torchvision.datasets.CIFAR100(root='./../data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) else: trainset",
"-depthwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat = net.module.layers[lnum].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params)",
"print(\" Compression ratio: {:.3}\".format(base_size / model_size)) shutil.rmtree(temp_path) # Fine-tune def finetune(net): net.train() train_loss",
"batch_size=128, shuffle=False, num_workers=2) else: trainset = datasets.ImageFolder(root=args.data_path+'/train', transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True,",
"= torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:])) else: net = torch.nn.DataParallel(MobileNet()) elif(name == 'resnet'): if(is_pruned == True):",
"to be analyzed\", default='vgg', choices=['vgg', 'mobilenet', 'resnet', 'resnet-56']) parser.add_argument(\"--model_path\", help=\"path where the model",
"0.5, 0.5))]) transform_test = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) if(args.data_path=='CIFAR100'):",
"pruner import * from config import * from ptflops import get_model_complexity_info import argparse",
"= pruned_sched_iter, pruned_epochs_iter, wd_iter best_acc = 0 lr_ind = 0 epoch = 0",
"= (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for lnum in range(13): w_mat",
"import numpy as np import torch.optim as optim import os import shutil from",
"acc ######### Load network or create new ######### if(args.train_acc == 'True' or args.test_acc",
"(%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Orthogonality evaluator def eval_ortho(): if(args.model ==",
"'True'): acc = cal_acc(net, use_loader=testloader) print(\" Test accuracy: {:.2%}\".format(acc)) ######### Orthogonality evaluation #########",
"'True' or args.flops=='True' or args.eval_ortho=='True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net'])",
"= 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets =",
"in enumerate(use_loader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs,",
"= (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\"",
"net = torch.nn.DataParallel(VGG()) elif(name == 'mobilenet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net",
"or args.test_acc == 'True' or args.finetune == 'True'): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5,",
"import * from ptflops import get_model_complexity_info import argparse ######### Parser ######### parser =",
"(L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 # Create model for evaluation#net = torch.nn.DataParallel(VGG())",
"0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3]: for b_id in range(num_blocks[mod_id]): w_mat =",
"= torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_1:",
"torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs",
"base_epochs, wd = pruned_sched_iter, pruned_epochs_iter, wd_iter best_acc = 0 lr_ind = 0 epoch",
"evaluation ######### if(args.train_acc == 'True'): acc = cal_acc(net, use_loader=trainloader) print(\" Train accuracy: {:.2%}\".format(acc))",
"from ptflops import get_model_complexity_info import argparse ######### Parser ######### parser = argparse.ArgumentParser() parser.add_argument(\"-m\",",
"<reponame>EkdeepSLubana/OrthoReg # -*- coding: utf-8 -*- import torch import torchvision from torchvision import",
"block', str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) w_mat = module_id[b_id].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] <",
"outputs = net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted =",
"print(\" Conv_{ind}: {num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'mobilenet'): w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1))",
"else: net = torch.nn.DataParallel(VGG()) elif(name == 'mobilenet'): if(is_pruned == True): cfg_p = net_dict['cfg']",
"temp_path = \"./temp_models/\" base_model = create_model(name=model, is_pruned=False) if os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path) state =",
"== 'mobilenet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:])) else:",
"get_model_complexity_info(net, (3, 32, 32), as_strings=False, print_per_layer_stat=False) print(' FLOPs: {:<8}'.format(flops)) # Compression Ratio def",
"w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1))",
"predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() return correct / total",
"(inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs)",
"FLOPs evaluation ######### if(args.flops == 'True'): cal_flops(net) ######### Compression ratio evaluation ######### if(args.compression",
"L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) w_mat = module_id[b_id].conv2.weight",
"choices=['True', 'False']) parser.add_argument(\"--test_acc\", help=\"evaluate test accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--flops\", help=\"calculate flops in",
"\"./temp_models/\" base_model = create_model(name=model, is_pruned=False) if os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path) state = {'net': base_model.state_dict()}",
"= 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(use_loader): inputs, targets =",
"evaluation#net = torch.nn.DataParallel(VGG()) def create_model(name, is_pruned): if(name == 'vgg'): if(is_pruned == True): cfg_p",
"pass mod_id += 1 # Create model for evaluation#net = torch.nn.DataParallel(VGG()) def create_model(name,",
"(angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) try: w_mat = module_id[b_id].shortcut[0].weight params",
"help=\"evaluate test accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--flops\", help=\"calculate flops in a model\", default='False',",
"Dataloader ######### if(args.train_acc == 'True' or args.test_acc == 'True' or args.finetune == 'True'):",
"trainset = datasets.ImageFolder(root=args.data_path+'/train', transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = datasets.ImageFolder(root=args.data_path+'/test',",
"L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item())) for conv_ind in [3, 7, 10,",
"= torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) ######### FLOPs evaluation ######### if(args.flops ==",
"print(\" base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3,",
"be analyzed\", default='vgg', choices=['vgg', 'mobilenet', 'resnet', 'resnet-56']) parser.add_argument(\"--model_path\", help=\"path where the model to",
"net.module.layers[lnum].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle =",
"pass mod_id += 1 elif(args.model == 'resnet-56'): num_blocks = [9,9,9] w_mat = net.module.conv1.weight",
"num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'mobilenet'): w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params),",
"state = {'net': base_model.state_dict()} torch.save(state, temp_path+'temp_base.pth') base_size = os.path.getsize(temp_path+'temp_base.pth') model_size = os.path.getsize(net_path) print(\"",
"Compression ratio evaluation ######### if(args.compression == 'True'): cal_compression_ratio(net_path=args.model_path, model=args.model) ######### Train accuracy evaluation",
"where the model to be analyzed is stored\", default='0') parser.add_argument(\"--data_path\", help=\"path to dataset\",",
"(w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat = torch.matmul(params, torch.t(params)) L_diag =",
"32), as_strings=False, print_per_layer_stat=False) print(' FLOPs: {:<8}'.format(flops)) # Compression Ratio def cal_compression_ratio(net_path, model): temp_path",
"args.eval_ortho=='True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) ######### FLOPs evaluation #########",
"if(args.compression == 'True'): cal_compression_ratio(net_path=args.model_path, model=args.model) ######### Train accuracy evaluation ######### if(args.train_acc == 'True'):",
"module_id in [net.module.layer1, net.module.layer2, net.module.layer3]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params",
"ptflops import get_model_complexity_info import argparse ######### Parser ######### parser = argparse.ArgumentParser() parser.add_argument(\"-m\", \"--model\",",
"module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat = torch.matmul(params,",
"shuffle=False, num_workers=2) else: trainset = datasets.ImageFolder(root=args.data_path+'/train', transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)",
"choices=['True', 'False']) parser.add_argument(\"--flops\", help=\"calculate flops in a model\", default='False', choices=['True', 'False']) parser.add_argument(\"--compression\", help=\"calculate",
"predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f",
"{num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for lnum in range(13): w_mat = net.module.layers[lnum].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat =",
"L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item())",
"(test_loss/(batch_idx+1), 100.*correct/total, correct, total)) acc = 100. * correct/total if acc > best_acc:",
"momentum=0.9, weight_decay=wd) while(lr_ind < len(base_sched)): optimizer.param_groups[0]['lr'] = base_sched[lr_ind] for n in range(base_epochs[lr_ind]): print('\\nEpoch:",
"try: w_mat = module_id[b_id].shortcut[0].weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t()",
"correct/total if acc > best_acc: print(\"best accuracy:\", acc) best_acc = acc ######### Load",
"for lnum in range(13): w_mat = net.module.layers[lnum].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(),",
"(angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for lnum in range(13): w_mat =",
"device = 'cuda' if torch.cuda.is_available() else 'cpu' criterion = nn.CrossEntropyLoss() ######### Functions to",
"= torchvision.datasets.CIFAR100(root='./../data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset =",
"L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0",
"= net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle",
"'False']) args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' criterion =",
"torch.nn.DataParallel(VGG_p(cfg_p)) else: net = torch.nn.DataParallel(VGG()) elif(name == 'mobilenet'): if(is_pruned == True): cfg_p =",
"trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR100(root='./../data', train=False, download=True, transform=transform_test) testloader",
"######### Print model name ######### print((args.model).upper()) ######### Dataloader ######### if(args.train_acc == 'True' or",
"7, 10, 14, 17, 21, 24, 28, 31]: w_mat = net.module.features[conv_ind].weight w_mat1 =",
"block', str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) try: w_mat = module_id[b_id].shortcut[0].weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1]",
"= net.module.layers[lnum].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle",
"model to be analyzed a pruned model?\", default='False', choices=['True', 'False']) parser.add_argument(\"--train_acc\", help=\"evaluate train",
"(3, 32, 32), as_strings=False, print_per_layer_stat=False) print(' FLOPs: {:<8}'.format(flops)) # Compression Ratio def cal_compression_ratio(net_path,",
"model?\", default='False', choices=['True', 'False']) parser.add_argument(\"--train_acc\", help=\"evaluate train accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--test_acc\", help=\"evaluate",
"torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) else: trainset = datasets.ImageFolder(root=args.data_path+'/train', transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,",
"datasets.ImageFolder(root=args.data_path+'/train', transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test) testloader",
"acc = cal_acc(net, use_loader=testloader) print(\" Test accuracy: {:.2%}\".format(acc)) ######### Orthogonality evaluation ######### if(args.eval_ortho",
"print(\" Conv_{ind} -pointwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'resnet'): num_blocks = [3,4,6,3] w_mat =",
"%.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) acc = 100.",
"= 0 total = 0 for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets",
"acc > best_acc: print(\"best accuracy:\", acc) best_acc = acc ######### Load network or",
"(b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1))",
"== 'True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) base_sched, base_epochs, wd",
"in range(13): w_mat = net.module.layers[lnum].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag",
"os.path.getsize(temp_path+'temp_base.pth') model_size = os.path.getsize(net_path) print(\" Compression ratio: {:.3}\".format(base_size / model_size)) shutil.rmtree(temp_path) # Fine-tune",
"0 total = 0 for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets =",
"== 'True'): eval_ortho() if(args.finetune == 'True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True'))",
"targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %",
"* from config import * from ptflops import get_model_complexity_info import argparse ######### Parser",
"% (L_diag.cpu()/L_angle.cpu()).item()) w_mat = module_id[b_id].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params =",
"net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted =",
"cal_flops(net): with torch.cuda.device(0): flops, params = get_model_complexity_info(net, (3, 32, 32), as_strings=False, print_per_layer_stat=False) print('",
"= net.module.features[0].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(torch.t(params),",
"for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs =",
"net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) base_sched, base_epochs, wd = pruned_sched_iter, pruned_epochs_iter, wd_iter best_acc",
"parser.add_argument(\"--test_acc\", help=\"evaluate test accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--flops\", help=\"calculate flops in a model\",",
"= net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1)",
"w_mat = net.module.layers[lnum].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1))",
"predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f",
"outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() return correct / total # FLOPs",
"== True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(VGG_p(cfg_p)) else: net = torch.nn.DataParallel(VGG()) elif(name",
"= net_dict['cfg'] net = torch.nn.DataParallel(VGG_p(cfg_p)) else: net = torch.nn.DataParallel(VGG()) elif(name == 'mobilenet'): if(is_pruned",
"accuracy: {:.2%}\".format(acc)) ######### Orthogonality evaluation ######### if(args.eval_ortho == 'True'): eval_ortho() if(args.finetune == 'True'):",
"angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=0,",
"default='vgg', choices=['vgg', 'mobilenet', 'resnet', 'resnet-56']) parser.add_argument(\"--model_path\", help=\"path where the model to be analyzed",
"net = torch.nn.DataParallel(ResNet56()) return net ######### Print model name ######### print((args.model).upper()) ######### Dataloader",
"enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs,",
"= torch.nn.DataParallel(VGG()) elif(name == 'mobilenet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net =",
"print(\" base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3]:",
"else 'cpu' criterion = nn.CrossEntropyLoss() ######### Functions to evaluate different properties ######### #",
"= outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() return correct / total #",
"== 'vgg'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(VGG_p(cfg_p)) else: net",
"acc) best_acc = acc ######### Load network or create new ######### if(args.train_acc ==",
"mod_id += 1 elif(args.model == 'resnet-56'): num_blocks = [9,9,9] w_mat = net.module.conv1.weight params",
"= torch.nn.DataParallel(MobileNet()) elif(name == 'resnet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net =",
"default='False', choices=['True', 'False']) args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu'",
"is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) ######### FLOPs evaluation ######### if(args.flops == 'True'): cal_flops(net) ######### Compression ratio",
"= nn.CrossEntropyLoss() ######### Functions to evaluate different properties ######### # Accuracy def cal_acc(net,",
"= net_dict['cfg'] net = torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else: net = torch.nn.DataParallel(ResNet56()) return net ######### Print",
"######### if(args.test_acc == 'True'): acc = cal_acc(net, use_loader=testloader) print(\" Test accuracy: {:.2%}\".format(acc)) #########",
"test(net): global best_acc net.eval() test_loss = 0 correct = 0 total = 0",
"eval_ortho() if(args.finetune == 'True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) base_sched,",
"== 'resnet'): num_blocks = [3,4,6,3] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat =",
"outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc:",
"100.*correct/total, correct, total)) # Orthogonality evaluator def eval_ortho(): if(args.model == 'vgg'): w_mat =",
"= net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted",
"params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle",
"cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(VGG_p(cfg_p)) else: net = torch.nn.DataParallel(VGG()) elif(name == 'mobilenet'):",
"= torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR100(root='./../data', train=False, download=True, transform=transform_test) testloader =",
"= net.module.layers[lnum].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle",
"params.t() angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+',",
"'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Orthogonality",
"= net.module.features[conv_ind].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[conv_ind].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params =",
"= [9,9,9] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag",
"else: net = torch.nn.DataParallel(ResNet34()) elif(name == 'resnet-56'): if(is_pruned == True): cfg_p = net_dict['cfg']",
"(0.5, 0.5, 0.5))]) transform_test = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])",
"acc = cal_acc(net, use_loader=trainloader) print(\" Train accuracy: {:.2%}\".format(acc)) ######### Test accuracy evaluation #########",
"be analyzed a pruned model?\", default='False', choices=['True', 'False']) parser.add_argument(\"--train_acc\", help=\"evaluate train accuracy\", default='False',",
"net.train() train_loss = 0 correct = 0 total = 0 for batch_idx, (inputs,",
"0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets)",
"os import shutil from models import * from pruner import * from config",
"analyzed\", default='vgg', choices=['vgg', 'mobilenet', 'resnet', 'resnet-56']) parser.add_argument(\"--model_path\", help=\"path where the model to be",
"= torch.nn.DataParallel(VGG()) def create_model(name, is_pruned): if(name == 'vgg'): if(is_pruned == True): cfg_p =",
"use_loader): net.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad():",
"0 lr_ind = 0 epoch = 0 optimizer = optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd)",
"help=\"architecture model to be analyzed\", default='vgg', choices=['vgg', 'mobilenet', 'resnet', 'resnet-56']) parser.add_argument(\"--model_path\", help=\"path where",
"== 'True' or args.test_acc == 'True' or args.finetune == 'True'): transform = transforms.Compose(",
"base_size = os.path.getsize(temp_path+'temp_base.pth') model_size = os.path.getsize(net_path) print(\" Compression ratio: {:.3}\".format(base_size / model_size)) shutil.rmtree(temp_path)",
"(angle_mat.norm(1)) print(' layer_'+str(mod_id) + ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id +=",
"parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' criterion = nn.CrossEntropyLoss() ######### Functions",
"mod_id += 1 # Create model for evaluation#net = torch.nn.DataParallel(VGG()) def create_model(name, is_pruned):",
"parser.add_argument(\"--pruned\", help=\"is the model to be analyzed a pruned model?\", default='False', choices=['True', 'False'])",
"= (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'mobilenet'): w_mat = net.module.conv1.weight params",
"mod_id = 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3]: for b_id in range(num_blocks[mod_id]):",
"== 'True' or args.flops=='True' or args.eval_ortho=='True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True'))",
"< params.shape[0]): params = params.t() angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle",
"(w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}",
"trainset = torchvision.datasets.CIFAR100(root='./../data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset",
"Print model name ######### print((args.model).upper()) ######### Dataloader ######### if(args.train_acc == 'True' or args.test_acc",
"targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss =",
"######### Parser ######### parser = argparse.ArgumentParser() parser.add_argument(\"-m\", \"--model\", help=\"architecture model to be analyzed\",",
"'vgg'): w_mat = net.module.features[0].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[0].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1))",
"numpy as np import torch.optim as optim import os import shutil from models",
"net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total",
"'False']) parser.add_argument(\"--test_acc\", help=\"evaluate test accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--flops\", help=\"calculate flops in a",
"= torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item()))",
"def create_model(name, is_pruned): if(name == 'vgg'): if(is_pruned == True): cfg_p = net_dict['cfg'] net",
"_, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss:",
"or args.eval_ortho=='True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) ######### FLOPs evaluation",
"print(\" Conv_{ind}: {num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item())) for conv_ind in [3, 7, 10, 14, 17, 21,",
"model=args.model) ######### Train accuracy evaluation ######### if(args.train_acc == 'True'): acc = cal_acc(net, use_loader=trainloader)",
"base_sched[lr_ind] for n in range(base_epochs[lr_ind]): print('\\nEpoch: {}'.format(epoch)) finetune(net) test(net) epoch += 1 lr_ind",
"layer_'+str(mod_id) + ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 elif(args.model",
"+= loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx,",
"= (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) try: w_mat = module_id[b_id].shortcut[0].weight",
"targets.size(0) correct += predicted.eq(targets).sum().item() return correct / total # FLOPs def cal_flops(net): with",
"train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR100(root='./../data', train=False,",
"= torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -pointwise: {num:.2}\".format(ind=lnum,",
"cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else: net = torch.nn.DataParallel(ResNet56()) return net #########",
"net.module.features[conv_ind].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[conv_ind].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1,",
"= params.t() angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print('",
"shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 # Create model for",
"net.module.features[0].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(torch.t(params), params)",
"torchvision.datasets.CIFAR100(root='./../data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) else: trainset =",
"download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) else: trainset = datasets.ImageFolder(root=args.data_path+'/train', transform=transform)",
"= (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat = torch.matmul(params, torch.t(params)) L_diag",
"'resnet-56']) parser.add_argument(\"--model_path\", help=\"path where the model to be analyzed is stored\", default='0') parser.add_argument(\"--data_path\",",
"the model to be analyzed a pruned model?\", default='False', choices=['True', 'False']) parser.add_argument(\"--train_acc\", help=\"evaluate",
"testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) # Testing def test(net): global best_acc net.eval()",
"base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3, net.module.layer4]:",
"%.2f' % (L_diag.cpu()/L_angle.cpu()).item()) w_mat = module_id[b_id].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params",
"a model\", default='False', choices=['True', 'False']) args = parser.parse_args() device = 'cuda' if torch.cuda.is_available()",
"_, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() return correct /",
"######### # Accuracy def cal_acc(net, use_loader): net.eval() test_loss = 0 correct = 0",
"######### if(args.train_acc == 'True'): acc = cal_acc(net, use_loader=trainloader) print(\" Train accuracy: {:.2%}\".format(acc)) #########",
"global best_acc net.eval() test_loss = 0 correct = 0 total = 0 with",
"b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(torch.t(params), params) L_diag",
"= 0 correct = 0 total = 0 for batch_idx, (inputs, targets) in",
"= (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) w_mat",
"predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct,",
"torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model",
"model to be analyzed is stored\", default='0') parser.add_argument(\"--data_path\", help=\"path to dataset\", default='CIFAR100') parser.add_argument(\"--pruned\",",
"enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets)",
"or create new ######### if(args.train_acc == 'True' or args.test_acc == 'True' or args.flops=='True'",
"= 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3, net.module.layer4]: for b_id in range(num_blocks[mod_id]):",
"shuffle=False, num_workers=2) # Testing def test(net): global best_acc net.eval() test_loss = 0 correct",
"+= targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'",
"%.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) acc = 100. * correct/total if",
"accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--test_acc\", help=\"evaluate test accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--flops\", help=\"calculate",
"######### Orthogonality evaluation ######### if(args.eval_ortho == 'True'): eval_ortho() if(args.finetune == 'True'): net_dict =",
"evaluation ######### if(args.test_acc == 'True'): acc = cal_acc(net, use_loader=testloader) print(\" Test accuracy: {:.2%}\".format(acc))",
"elif(args.model == 'resnet-56'): num_blocks = [9,9,9] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat",
"to be analyzed is stored\", default='0') parser.add_argument(\"--data_path\", help=\"path to dataset\", default='CIFAR100') parser.add_argument(\"--pruned\", help=\"is",
"model is\", default='False', choices=['True', 'False']) parser.add_argument(\"--finetune\", help=\"fine-tune a model\", default='False', choices=['True', 'False']) args",
"loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() return correct",
"== 'True' or args.test_acc == 'True' or args.flops=='True' or args.eval_ortho=='True'): net_dict = torch.load(args.model_path)",
"elif(name == 'mobilenet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:]))",
"Conv_{ind}: {num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'mobilenet'): w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat",
"= (angle_mat.norm(1)) print(\" Conv_{ind} -depthwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat = net.module.layers[lnum].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1))",
"torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR100(root='./../data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset,",
"32, 32), as_strings=False, print_per_layer_stat=False) print(' FLOPs: {:<8}'.format(flops)) # Compression Ratio def cal_compression_ratio(net_path, model):",
"= datasets.ImageFolder(root=args.data_path+'/train', transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test)",
"else: trainset = datasets.ImageFolder(root=args.data_path+'/train', transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset =",
"base_model = create_model(name=model, is_pruned=False) if os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path) state = {'net': base_model.state_dict()} torch.save(state,",
"transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test) testloader =",
"== 'True'): cal_compression_ratio(net_path=args.model_path, model=args.model) ######### Train accuracy evaluation ######### if(args.train_acc == 'True'): acc",
"num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat = net.module.layers[lnum].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag =",
"-pointwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'resnet'): num_blocks = [3,4,6,3] w_mat = net.module.conv1.weight params",
"(angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'mobilenet'): w_mat = net.module.conv1.weight params =",
"import os import shutil from models import * from pruner import * from",
"a pruned model?\", default='False', choices=['True', 'False']) parser.add_argument(\"--train_acc\", help=\"evaluate train accuracy\", default='False', choices=['True', 'False'])",
"# FLOPs def cal_flops(net): with torch.cuda.device(0): flops, params = get_model_complexity_info(net, (3, 32, 32),",
"= \"./temp_models/\" base_model = create_model(name=model, is_pruned=False) if os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path) state = {'net':",
"'True'): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_test =",
"transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) if(args.data_path=='CIFAR100'): trainset = torchvision.datasets.CIFAR100(root='./../data', train=True,",
"for module_id in [net.module.layer1, net.module.layer2, net.module.layer3, net.module.layer4]: for b_id in range(num_blocks[mod_id]): w_mat =",
"(angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item())) for conv_ind in [3, 7, 10, 14, 17,",
"best_acc: print(\"best accuracy:\", acc) best_acc = acc ######### Load network or create new",
"finetune(net): net.train() train_loss = 0 correct = 0 total = 0 for batch_idx,",
"targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0)",
"import torchvision from torchvision import datasets, models, transforms import numpy as np import",
"stored\", default='0') parser.add_argument(\"--data_path\", help=\"path to dataset\", default='CIFAR100') parser.add_argument(\"--pruned\", help=\"is the model to be",
"with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(use_loader): inputs, targets = inputs.to(device), targets.to(device)",
"layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3, net.module.layer4]: for",
"train_loss = 0 correct = 0 total = 0 for batch_idx, (inputs, targets)",
"[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_test = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5,",
"correct, total)) acc = 100. * correct/total if acc > best_acc: print(\"best accuracy:\",",
"Parser ######### parser = argparse.ArgumentParser() parser.add_argument(\"-m\", \"--model\", help=\"architecture model to be analyzed\", default='vgg',",
"if(args.train_acc == 'True' or args.test_acc == 'True' or args.finetune == 'True'): transform =",
"Fine-tune def finetune(net): net.train() train_loss = 0 correct = 0 total = 0",
"'False']) parser.add_argument(\"--compression\", help=\"calculate compression ratio for model\", default='False', choices=['True', 'False']) parser.add_argument(\"--eval_ortho\", help=\"evaluate how",
"{num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'mobilenet'): w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat =",
"import datasets, models, transforms import numpy as np import torch.optim as optim import",
"= create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) base_sched, base_epochs, wd = pruned_sched_iter, pruned_epochs_iter, wd_iter best_acc =",
"net_dict['cfg'] net = torch.nn.DataParallel(ResPruned(cfg_p)) else: net = torch.nn.DataParallel(ResNet34()) elif(name == 'resnet-56'): if(is_pruned ==",
"parser.add_argument(\"--compression\", help=\"calculate compression ratio for model\", default='False', choices=['True', 'False']) parser.add_argument(\"--eval_ortho\", help=\"evaluate how orthogonal",
"0.5))]) if(args.data_path=='CIFAR100'): trainset = torchvision.datasets.CIFAR100(root='./../data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True,",
"print(' layer_'+str(mod_id) + ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1",
"to evaluate different properties ######### # Accuracy def cal_acc(net, use_loader): net.eval() test_loss =",
"lnum in range(13): w_mat = net.module.layers[lnum].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params)",
"print((args.model).upper()) ######### Dataloader ######### if(args.train_acc == 'True' or args.test_acc == 'True' or args.finetune",
"= 100. * correct/total if acc > best_acc: print(\"best accuracy:\", acc) best_acc =",
"torch.nn.DataParallel(MobileNet()) elif(name == 'resnet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned(cfg_p))",
"create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) base_sched, base_epochs, wd = pruned_sched_iter, pruned_epochs_iter, wd_iter best_acc = 0",
"is\", default='False', choices=['True', 'False']) parser.add_argument(\"--finetune\", help=\"fine-tune a model\", default='False', choices=['True', 'False']) args =",
"= torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item()))",
"'resnet-56'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else: net =",
"num_workers=2) testset = torchvision.datasets.CIFAR100(root='./../data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2)",
"print(\" Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for lnum in range(13): w_mat = net.module.layers[lnum].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1))",
"%.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Orthogonality evaluator def eval_ortho(): if(args.model",
"= 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs,",
"| Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) acc = 100. *",
"cal_compression_ratio(net_path, model): temp_path = \"./temp_models/\" base_model = create_model(name=model, is_pruned=False) if os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path)",
"= optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd) while(lr_ind < len(base_sched)): optimizer.param_groups[0]['lr'] = base_sched[lr_ind] for n",
"model\", default='False', choices=['True', 'False']) parser.add_argument(\"--eval_ortho\", help=\"evaluate how orthogonal a model is\", default='False', choices=['True',",
"num_workers=2) testset = datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) # Testing",
"######### if(args.flops == 'True'): cal_flops(net) ######### Compression ratio evaluation ######### if(args.compression == 'True'):",
"net = torch.nn.DataParallel(ResNet34()) elif(name == 'resnet-56'): if(is_pruned == True): cfg_p = net_dict['cfg'] net",
"torch.nn.DataParallel(ResNet34()) elif(name == 'resnet-56'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned_cifar(cfg_p))",
"progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))",
"Functions to evaluate different properties ######### # Accuracy def cal_acc(net, use_loader): net.eval() test_loss",
"loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader),",
"net = torch.nn.DataParallel(ResPruned(cfg_p)) else: net = torch.nn.DataParallel(ResNet34()) elif(name == 'resnet-56'): if(is_pruned == True):",
"the model to be analyzed is stored\", default='0') parser.add_argument(\"--data_path\", help=\"path to dataset\", default='CIFAR100')",
"{num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat = net.module.layers[lnum].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag",
"targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %",
"or args.flops=='True' or args.eval_ortho=='True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) #########",
"torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:])) else: net = torch.nn.DataParallel(MobileNet()) elif(name == 'resnet'): if(is_pruned == True): cfg_p",
"torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model ==",
"torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item())) for",
"< len(base_sched)): optimizer.param_groups[0]['lr'] = base_sched[lr_ind] for n in range(base_epochs[lr_ind]): print('\\nEpoch: {}'.format(epoch)) finetune(net) test(net)",
"angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id) +",
"default='False', choices=['True', 'False']) parser.add_argument(\"--finetune\", help=\"fine-tune a model\", default='False', choices=['True', 'False']) args = parser.parse_args()",
"= torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) # Testing def test(net): global best_acc net.eval() test_loss",
"for batch_idx, (inputs, targets) in enumerate(use_loader): inputs, targets = inputs.to(device), targets.to(device) outputs =",
"= criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0)",
"'resnet-56'): num_blocks = [9,9,9] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params),",
"outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _,",
"properties ######### # Accuracy def cal_acc(net, use_loader): net.eval() test_loss = 0 correct =",
"\"--model\", help=\"architecture model to be analyzed\", default='vgg', choices=['vgg', 'mobilenet', 'resnet', 'resnet-56']) parser.add_argument(\"--model_path\", help=\"path",
"= (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[0].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1)",
"* correct/total if acc > best_acc: print(\"best accuracy:\", acc) best_acc = acc #########",
"= 'cuda' if torch.cuda.is_available() else 'cpu' criterion = nn.CrossEntropyLoss() ######### Functions to evaluate",
"shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 elif(args.model == 'resnet-56'): num_blocks",
"except: pass mod_id += 1 elif(args.model == 'resnet-56'): num_blocks = [9,9,9] w_mat =",
"# Compression Ratio def cal_compression_ratio(net_path, model): temp_path = \"./temp_models/\" base_model = create_model(name=model, is_pruned=False)",
"shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR100(root='./../data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False,",
"def test(net): global best_acc net.eval() test_loss = 0 correct = 0 total =",
"= torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for",
"as optim import os import shutil from models import * from pruner import",
"+= predicted.eq(targets).sum().item() return correct / total # FLOPs def cal_flops(net): with torch.cuda.device(0): flops,",
"[3, 7, 10, 14, 17, 21, 24, 28, 31]: w_mat = net.module.features[conv_ind].weight w_mat1",
"{num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item())) for conv_ind in [3, 7, 10, 14, 17, 21, 24, 28,",
"in [3, 7, 10, 14, 17, 21, 24, 28, 31]: w_mat = net.module.features[conv_ind].weight",
"help=\"fine-tune a model\", default='False', choices=['True', 'False']) args = parser.parse_args() device = 'cuda' if",
"config import * from ptflops import get_model_complexity_info import argparse ######### Parser ######### parser",
"default='False', choices=['True', 'False']) parser.add_argument(\"--test_acc\", help=\"evaluate test accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--flops\", help=\"calculate flops",
"FLOPs def cal_flops(net): with torch.cuda.device(0): flops, params = get_model_complexity_info(net, (3, 32, 32), as_strings=False,",
"net.load_state_dict(net_dict['net']) base_sched, base_epochs, wd = pruned_sched_iter, pruned_epochs_iter, wd_iter best_acc = 0 lr_ind =",
"targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) loss.backward()",
"os.path.getsize(net_path) print(\" Compression ratio: {:.3}\".format(base_size / model_size)) shutil.rmtree(temp_path) # Fine-tune def finetune(net): net.train()",
"+ ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 # Create",
"targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss",
"total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%%",
"# Create model for evaluation#net = torch.nn.DataParallel(VGG()) def create_model(name, is_pruned): if(name == 'vgg'):",
"{:.2%}\".format(acc)) ######### Test accuracy evaluation ######### if(args.test_acc == 'True'): acc = cal_acc(net, use_loader=testloader)",
"torchvision from torchvision import datasets, models, transforms import numpy as np import torch.optim",
"######### print((args.model).upper()) ######### Dataloader ######### if(args.train_acc == 'True' or args.test_acc == 'True' or",
"correct = 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in",
"= inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets) test_loss += loss.item()",
"b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params",
"[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) if(args.data_path=='CIFAR100'): trainset = torchvision.datasets.CIFAR100(root='./../data', train=True, download=True,",
"train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) else: trainset = datasets.ImageFolder(root=args.data_path+'/train',",
"transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) # Testing def test(net): global best_acc",
"= torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else: net = torch.nn.DataParallel(ResNet56()) return net ######### Print model name #########",
"= torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) base_sched, base_epochs, wd = pruned_sched_iter, pruned_epochs_iter,",
"correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1),",
"L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -depthwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat = net.module.layers[lnum].conv2.weight params =",
"is stored\", default='0') parser.add_argument(\"--data_path\", help=\"path to dataset\", default='CIFAR100') parser.add_argument(\"--pruned\", help=\"is the model to",
"cal_acc(net, use_loader=trainloader) print(\" Train accuracy: {:.2%}\".format(acc)) ######### Test accuracy evaluation ######### if(args.test_acc ==",
"if torch.cuda.is_available() else 'cpu' criterion = nn.CrossEntropyLoss() ######### Functions to evaluate different properties",
"args.flops=='True' or args.eval_ortho=='True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) ######### FLOPs",
"'True'): acc = cal_acc(net, use_loader=trainloader) print(\" Train accuracy: {:.2%}\".format(acc)) ######### Test accuracy evaluation",
"test accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--flops\", help=\"calculate flops in a model\", default='False', choices=['True',",
"different properties ######### # Accuracy def cal_acc(net, use_loader): net.eval() test_loss = 0 correct",
"elif(args.model == 'mobilenet'): w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params)",
"for conv_ind in [3, 7, 10, 14, 17, 21, 24, 28, 31]: w_mat",
"w_mat = net.module.layers[lnum].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1))",
"to dataset\", default='CIFAR100') parser.add_argument(\"--pruned\", help=\"is the model to be analyzed a pruned model?\",",
"for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]):",
"== 'mobilenet'): w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag",
"= torch.nn.DataParallel(ResPruned(cfg_p)) else: net = torch.nn.DataParallel(ResNet34()) elif(name == 'resnet-56'): if(is_pruned == True): cfg_p",
"str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) try: w_mat = module_id[b_id].shortcut[0].weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] <",
"== 'True'): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_test",
"params.t() angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)",
"%.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 elif(args.model == 'resnet-56'): num_blocks =",
"torch.nn.DataParallel(VGG()) elif(name == 'mobilenet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(MobileNet_p(cfg_p[0],",
"torch.nn.DataParallel(ResNet56()) return net ######### Print model name ######### print((args.model).upper()) ######### Dataloader ######### if(args.train_acc",
"range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t()",
"best_acc net.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad():",
"import * from pruner import * from config import * from ptflops import",
"= parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' criterion = nn.CrossEntropyLoss() #########",
"net.module.layer2, net.module.layer3]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1]",
"criterion = nn.CrossEntropyLoss() ######### Functions to evaluate different properties ######### # Accuracy def",
"default='0') parser.add_argument(\"--data_path\", help=\"path to dataset\", default='CIFAR100') parser.add_argument(\"--pruned\", help=\"is the model to be analyzed",
"angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item()))",
"angle_mat = torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -depthwise:",
"def cal_compression_ratio(net_path, model): temp_path = \"./temp_models/\" base_model = create_model(name=model, is_pruned=False) if os.path.exists(temp_path): shutil.rmtree(temp_path)",
"net.module.layers[lnum].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle =",
"optimizer.param_groups[0]['lr'] = base_sched[lr_ind] for n in range(base_epochs[lr_ind]): print('\\nEpoch: {}'.format(epoch)) finetune(net) test(net) epoch +=",
"'mobilenet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:])) else: net",
"(angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id",
"= 0 epoch = 0 optimizer = optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd) while(lr_ind <",
"= transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_test = transforms.Compose( [transforms.ToTensor(),",
"targets) test_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct +=",
"layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3]: for b_id",
"net = torch.nn.DataParallel(MobileNet()) elif(name == 'resnet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net",
"if(args.flops == 'True'): cal_flops(net) ######### Compression ratio evaluation ######### if(args.compression == 'True'): cal_compression_ratio(net_path=args.model_path,",
"total)) # Orthogonality evaluator def eval_ortho(): if(args.model == 'vgg'): w_mat = net.module.features[0].weight w_mat1",
"def cal_acc(net, use_loader): net.eval() test_loss = 0 correct = 0 total = 0",
"torch.nn.DataParallel(ResPruned(cfg_p)) else: net = torch.nn.DataParallel(ResNet34()) elif(name == 'resnet-56'): if(is_pruned == True): cfg_p =",
"= os.path.getsize(net_path) print(\" Compression ratio: {:.3}\".format(base_size / model_size)) shutil.rmtree(temp_path) # Fine-tune def finetune(net):",
"= torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id) + ',",
"(L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3]: for b_id in",
"0.5, 0.5), (0.5, 0.5, 0.5))]) if(args.data_path=='CIFAR100'): trainset = torchvision.datasets.CIFAR100(root='./../data', train=True, download=True, transform=transform) trainloader",
"if(args.finetune == 'True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) base_sched, base_epochs,",
"params = params.t() angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1))",
"'True'): cal_compression_ratio(net_path=args.model_path, model=args.model) ######### Train accuracy evaluation ######### if(args.train_acc == 'True'): acc =",
"help=\"is the model to be analyzed a pruned model?\", default='False', choices=['True', 'False']) parser.add_argument(\"--train_acc\",",
"== True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else: net = torch.nn.DataParallel(ResNet56()) return",
"w_mat = module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat",
"0 for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad()",
"(angle_mat.norm(1)) print(\" base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id in [net.module.layer1, net.module.layer2,",
"testset = datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) # Testing def",
"total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets",
"(L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 elif(args.model == 'resnet-56'): num_blocks = [9,9,9] w_mat",
"= outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f |",
"= torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle =",
"evaluation ######### if(args.eval_ortho == 'True'): eval_ortho() if(args.finetune == 'True'): net_dict = torch.load(args.model_path) net",
"# -*- coding: utf-8 -*- import torch import torchvision from torchvision import datasets,",
"print(\" Conv_{ind} -depthwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat = net.module.layers[lnum].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat =",
"== 'True'): acc = cal_acc(net, use_loader=trainloader) print(\" Train accuracy: {:.2%}\".format(acc)) ######### Test accuracy",
"= (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for",
"ratio for model\", default='False', choices=['True', 'False']) parser.add_argument(\"--eval_ortho\", help=\"evaluate how orthogonal a model is\",",
"total += targets.size(0) correct += predicted.eq(targets).sum().item() return correct / total # FLOPs def",
"module_id[b_id].shortcut[0].weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat = torch.matmul(params,",
"net ######### Print model name ######### print((args.model).upper()) ######### Dataloader ######### if(args.train_acc == 'True'",
"print(\"best accuracy:\", acc) best_acc = acc ######### Load network or create new #########",
"args.test_acc == 'True' or args.flops=='True' or args.eval_ortho=='True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model,",
"w_mat = module_id[b_id].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat",
"angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" base layer:\",",
"(angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) w_mat =",
"% (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Orthogonality evaluator def eval_ortho(): if(args.model == 'vgg'):",
"predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct,",
"correct / total # FLOPs def cal_flops(net): with torch.cuda.device(0): flops, params = get_model_complexity_info(net,",
"= net.module.features[0].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[0].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params =",
"= 0 lr_ind = 0 epoch = 0 optimizer = optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9,",
"= (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item())) for conv_ind in [3,",
"torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1))",
"21, 24, 28, 31]: w_mat = net.module.features[conv_ind].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[conv_ind].bias",
"nn.CrossEntropyLoss() ######### Functions to evaluate different properties ######### # Accuracy def cal_acc(net, use_loader):",
"torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -pointwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item()))",
"'mobilenet'): w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag =",
"parser.add_argument(\"--flops\", help=\"calculate flops in a model\", default='False', choices=['True', 'False']) parser.add_argument(\"--compression\", help=\"calculate compression ratio",
"default='False', choices=['True', 'False']) parser.add_argument(\"--compression\", help=\"calculate compression ratio for model\", default='False', choices=['True', 'False']) parser.add_argument(\"--eval_ortho\",",
"Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for lnum in range(13): w_mat = net.module.layers[lnum].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat",
"layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) try: w_mat = module_id[b_id].shortcut[0].weight params = (w_mat.reshape(w_mat.shape[0],-1))",
"(b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1))",
"0.5), (0.5, 0.5, 0.5))]) transform_test = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5,",
"help=\"calculate flops in a model\", default='False', choices=['True', 'False']) parser.add_argument(\"--compression\", help=\"calculate compression ratio for",
"(inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss",
"or args.test_acc == 'True' or args.flops=='True' or args.eval_ortho=='True'): net_dict = torch.load(args.model_path) net =",
"'False']) parser.add_argument(\"--finetune\", help=\"fine-tune a model\", default='False', choices=['True', 'False']) args = parser.parse_args() device =",
"num=(L_diag.cpu()/L_angle.cpu()).item())) for conv_ind in [3, 7, 10, 14, 17, 21, 24, 28, 31]:",
"layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) w_mat = module_id[b_id].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1]",
"net.module.layer3]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] <",
"transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR100(root='./../data', train=False, download=True, transform=transform_test)",
"Orthogonality evaluation ######### if(args.eval_ortho == 'True'): eval_ortho() if(args.finetune == 'True'): net_dict = torch.load(args.model_path)",
"= torch.nn.DataParallel(VGG_p(cfg_p)) else: net = torch.nn.DataParallel(VGG()) elif(name == 'mobilenet'): if(is_pruned == True): cfg_p",
"help=\"path where the model to be analyzed is stored\", default='0') parser.add_argument(\"--data_path\", help=\"path to",
"= 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3]: for b_id in range(num_blocks[mod_id]): w_mat",
"-*- coding: utf-8 -*- import torch import torchvision from torchvision import datasets, models,",
"== 'resnet'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned(cfg_p)) else: net",
"new ######### if(args.train_acc == 'True' or args.test_acc == 'True' or args.flops=='True' or args.eval_ortho=='True'):",
"base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3]: for",
"Test accuracy evaluation ######### if(args.test_acc == 'True'): acc = cal_acc(net, use_loader=testloader) print(\" Test",
"else: net = torch.nn.DataParallel(ResNet56()) return net ######### Print model name ######### print((args.model).upper()) #########",
"download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR100(root='./../data', train=False, download=True,",
"(angle_mat.norm(1)) print(\" Conv_{ind} -pointwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'resnet'): num_blocks = [3,4,6,3] w_mat",
"= (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id) + ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item())",
"train accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--test_acc\", help=\"evaluate test accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--flops\",",
"len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) #",
"choices=['True', 'False']) parser.add_argument(\"--compression\", help=\"calculate compression ratio for model\", default='False', choices=['True', 'False']) parser.add_argument(\"--eval_ortho\", help=\"evaluate",
"outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc:",
"'True'): eval_ortho() if(args.finetune == 'True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net'])",
"w_mat = net.module.features[conv_ind].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[conv_ind].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params",
"Create model for evaluation#net = torch.nn.DataParallel(VGG()) def create_model(name, is_pruned): if(name == 'vgg'): if(is_pruned",
"L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for lnum in range(13):",
"if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else: net = torch.nn.DataParallel(ResNet56())",
"######### Dataloader ######### if(args.train_acc == 'True' or args.test_acc == 'True' or args.finetune ==",
"transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_test = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5),",
"while(lr_ind < len(base_sched)): optimizer.param_groups[0]['lr'] = base_sched[lr_ind] for n in range(base_epochs[lr_ind]): print('\\nEpoch: {}'.format(epoch)) finetune(net)",
"Testing def test(net): global best_acc net.eval() test_loss = 0 correct = 0 total",
"w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[0].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1),",
"if(args.train_acc == 'True'): acc = cal_acc(net, use_loader=trainloader) print(\" Train accuracy: {:.2%}\".format(acc)) ######### Test",
"{'net': base_model.state_dict()} torch.save(state, temp_path+'temp_base.pth') base_size = os.path.getsize(temp_path+'temp_base.pth') model_size = os.path.getsize(net_path) print(\" Compression ratio:",
"######### Compression ratio evaluation ######### if(args.compression == 'True'): cal_compression_ratio(net_path=args.model_path, model=args.model) ######### Train accuracy",
"to be analyzed a pruned model?\", default='False', choices=['True', 'False']) parser.add_argument(\"--train_acc\", help=\"evaluate train accuracy\",",
"torch.optim as optim import os import shutil from models import * from pruner",
"as np import torch.optim as optim import os import shutil from models import",
"torch import torchvision from torchvision import datasets, models, transforms import numpy as np",
"FLOPs: {:<8}'.format(flops)) # Compression Ratio def cal_compression_ratio(net_path, model): temp_path = \"./temp_models/\" base_model =",
"L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'mobilenet'): w_mat = net.module.conv1.weight",
"= net_dict['cfg'] net = torch.nn.DataParallel(ResPruned(cfg_p)) else: net = torch.nn.DataParallel(ResNet34()) elif(name == 'resnet-56'): if(is_pruned",
"= argparse.ArgumentParser() parser.add_argument(\"-m\", \"--model\", help=\"architecture model to be analyzed\", default='vgg', choices=['vgg', 'mobilenet', 'resnet',",
"= (angle_mat.norm(1)) print(\" Conv_{ind} -pointwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'resnet'): num_blocks = [3,4,6,3]",
"(angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'mobilenet'): w_mat =",
"1 # Create model for evaluation#net = torch.nn.DataParallel(VGG()) def create_model(name, is_pruned): if(name ==",
"args.finetune == 'True'): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])",
"loss = criterion(outputs, targets) test_loss += loss.item() _, predicted = outputs.max(1) total +=",
"datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) # Testing def test(net): global",
"L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id) + ', shortcut: %.2f' %",
"10, 14, 17, 21, 24, 28, 31]: w_mat = net.module.features[conv_ind].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1))",
"dim=1) angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}:",
"L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -pointwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model ==",
"torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f' %",
"parser.add_argument(\"--finetune\", help=\"fine-tune a model\", default='False', choices=['True', 'False']) args = parser.parse_args() device = 'cuda'",
"shutil from models import * from pruner import * from config import *",
"for n in range(base_epochs[lr_ind]): print('\\nEpoch: {}'.format(epoch)) finetune(net) test(net) epoch += 1 lr_ind +=",
"create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) ######### FLOPs evaluation ######### if(args.flops == 'True'): cal_flops(net) ######### Compression",
"lr_ind = 0 epoch = 0 optimizer = optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd) while(lr_ind",
"predicted.eq(targets).sum().item() return correct / total # FLOPs def cal_flops(net): with torch.cuda.device(0): flops, params",
"ratio evaluation ######### if(args.compression == 'True'): cal_compression_ratio(net_path=args.model_path, model=args.model) ######### Train accuracy evaluation #########",
"transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_test = transforms.Compose(",
"| Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Orthogonality evaluator def",
"in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss =",
"help=\"path to dataset\", default='CIFAR100') parser.add_argument(\"--pruned\", help=\"is the model to be analyzed a pruned",
"L_angle = (angle_mat.norm(1)) print(\" base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id in",
"params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id =",
"best_acc = 0 lr_ind = 0 epoch = 0 optimizer = optim.SGD(net.parameters(), lr=base_sched[lr_ind],",
"or args.finetune == 'True'): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5,",
"net_dict['cfg'] net = torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else: net = torch.nn.DataParallel(ResNet56()) return net ######### Print model",
"w_mat = net.module.features[0].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[0].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params",
"= torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle =",
"28, 31]: w_mat = net.module.features[conv_ind].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[conv_ind].bias b_mat1 =",
"= (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) try:",
"model): temp_path = \"./temp_models/\" base_model = create_model(name=model, is_pruned=False) if os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path) state",
"cal_acc(net, use_loader): net.eval() test_loss = 0 correct = 0 total = 0 with",
"default='False', choices=['True', 'False']) parser.add_argument(\"--flops\", help=\"calculate flops in a model\", default='False', choices=['True', 'False']) parser.add_argument(\"--compression\",",
"optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct +=",
"(L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3, net.module.layer4]: for b_id",
"L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) try: w_mat =",
"elif(args.model == 'resnet'): num_blocks = [3,4,6,3] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat",
"torch.nn.DataParallel(VGG()) def create_model(name, is_pruned): if(name == 'vgg'): if(is_pruned == True): cfg_p = net_dict['cfg']",
"torch.cuda.is_available() else 'cpu' criterion = nn.CrossEntropyLoss() ######### Functions to evaluate different properties #########",
"correct = 0 total = 0 for batch_idx, (inputs, targets) in enumerate(trainloader): inputs,",
"+= predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total,",
"= transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) if(args.data_path=='CIFAR100'): trainset = torchvision.datasets.CIFAR100(root='./../data',",
"create new ######### if(args.train_acc == 'True' or args.test_acc == 'True' or args.flops=='True' or",
"params = get_model_complexity_info(net, (3, 32, 32), as_strings=False, print_per_layer_stat=False) print(' FLOPs: {:<8}'.format(flops)) # Compression",
"= (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) w_mat = module_id[b_id].conv2.weight params",
"# Testing def test(net): global best_acc net.eval() test_loss = 0 correct = 0",
"'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) acc =",
"torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(use_loader): inputs, targets = inputs.to(device), targets.to(device) outputs",
"24, 28, 31]: w_mat = net.module.features[conv_ind].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[conv_ind].bias b_mat1",
"= (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'mobilenet'): w_mat",
"(w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[0].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat",
"def cal_flops(net): with torch.cuda.device(0): flops, params = get_model_complexity_info(net, (3, 32, 32), as_strings=False, print_per_layer_stat=False)",
"(0.5, 0.5, 0.5))]) if(args.data_path=='CIFAR100'): trainset = torchvision.datasets.CIFAR100(root='./../data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset,",
"Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) acc = 100. * correct/total",
"layer_'+str(mod_id) + ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 #",
"+= 1 # Create model for evaluation#net = torch.nn.DataParallel(VGG()) def create_model(name, is_pruned): if(name",
"'cuda' if torch.cuda.is_available() else 'cpu' criterion = nn.CrossEntropyLoss() ######### Functions to evaluate different",
"default='False', choices=['True', 'False']) parser.add_argument(\"--train_acc\", help=\"evaluate train accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--test_acc\", help=\"evaluate test",
"(w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_base:",
"default='CIFAR100') parser.add_argument(\"--pruned\", help=\"is the model to be analyzed a pruned model?\", default='False', choices=['True',",
"testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) else: trainset = datasets.ImageFolder(root=args.data_path+'/train', transform=transform) trainloader =",
"torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id) + ', shortcut: %.2f'",
"0 epoch = 0 optimizer = optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd) while(lr_ind < len(base_sched)):",
"help=\"calculate compression ratio for model\", default='False', choices=['True', 'False']) parser.add_argument(\"--eval_ortho\", help=\"evaluate how orthogonal a",
"/ model_size)) shutil.rmtree(temp_path) # Fine-tune def finetune(net): net.train() train_loss = 0 correct =",
"net.module.layer2, net.module.layer3, net.module.layer4]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1))",
"base_sched, base_epochs, wd = pruned_sched_iter, pruned_epochs_iter, wd_iter best_acc = 0 lr_ind = 0",
"b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(params, torch.t(params)) L_diag",
"optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item()",
"'cpu' criterion = nn.CrossEntropyLoss() ######### Functions to evaluate different properties ######### # Accuracy",
"print(' FLOPs: {:<8}'.format(flops)) # Compression Ratio def cal_compression_ratio(net_path, model): temp_path = \"./temp_models/\" base_model",
"Compression Ratio def cal_compression_ratio(net_path, model): temp_path = \"./temp_models/\" base_model = create_model(name=model, is_pruned=False) if",
"= (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[conv_ind].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1)",
"if(name == 'vgg'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(VGG_p(cfg_p)) else:",
"choices=['True', 'False']) parser.add_argument(\"--finetune\", help=\"fine-tune a model\", default='False', choices=['True', 'False']) args = parser.parse_args() device",
"inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss",
"torchvision import datasets, models, transforms import numpy as np import torch.optim as optim",
"'False']) parser.add_argument(\"--flops\", help=\"calculate flops in a model\", default='False', choices=['True', 'False']) parser.add_argument(\"--compression\", help=\"calculate compression",
"= outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f |",
"if(args.eval_ortho == 'True'): eval_ortho() if(args.finetune == 'True'): net_dict = torch.load(args.model_path) net = create_model(name=args.model,",
"choices=['True', 'False']) args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' criterion",
"from models import * from pruner import * from config import * from",
"net.eval() test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for",
"######### if(args.train_acc == 'True' or args.test_acc == 'True' or args.finetune == 'True'): transform",
"from config import * from ptflops import get_model_complexity_info import argparse ######### Parser #########",
"evaluation ######### if(args.compression == 'True'): cal_compression_ratio(net_path=args.model_path, model=args.model) ######### Train accuracy evaluation ######### if(args.train_acc",
"'resnet', 'resnet-56']) parser.add_argument(\"--model_path\", help=\"path where the model to be analyzed is stored\", default='0')",
"net.module.layer3, net.module.layer4]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1]",
"testset = torchvision.datasets.CIFAR100(root='./../data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) else:",
"cal_acc(net, use_loader=testloader) print(\" Test accuracy: {:.2%}\".format(acc)) ######### Orthogonality evaluation ######### if(args.eval_ortho == 'True'):",
"for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs",
"/ total # FLOPs def cal_flops(net): with torch.cuda.device(0): flops, params = get_model_complexity_info(net, (3,",
"Train accuracy: {:.2%}\".format(acc)) ######### Test accuracy evaluation ######### if(args.test_acc == 'True'): acc =",
"= net.module.features[conv_ind].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(params,",
"torch.save(state, temp_path+'temp_base.pth') base_size = os.path.getsize(temp_path+'temp_base.pth') model_size = os.path.getsize(net_path) print(\" Compression ratio: {:.3}\".format(base_size /",
"Compression ratio: {:.3}\".format(base_size / model_size)) shutil.rmtree(temp_path) # Fine-tune def finetune(net): net.train() train_loss =",
"b_mat = net.module.features[0].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat =",
"net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle =",
"enumerate(use_loader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets)",
"w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[conv_ind].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1),",
"evaluator def eval_ortho(): if(args.model == 'vgg'): w_mat = net.module.features[0].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat",
"= (angle_mat.norm(1)) print(\" base layer:\", (L_diag.cpu()/L_angle.cpu()).item()) mod_id = 0 for module_id in [net.module.layer1,",
"L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f' % (L_diag.cpu()/L_angle.cpu()).item())",
"from torchvision import datasets, models, transforms import numpy as np import torch.optim as",
"is_pruned): if(name == 'vgg'): if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(VGG_p(cfg_p))",
"Orthogonality evaluator def eval_ortho(): if(args.model == 'vgg'): w_mat = net.module.features[0].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1))",
"0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3, net.module.layer4]: for b_id in range(num_blocks[mod_id]): w_mat",
"transforms import numpy as np import torch.optim as optim import os import shutil",
"+= 1 elif(args.model == 'resnet-56'): num_blocks = [9,9,9] w_mat = net.module.conv1.weight params =",
"= create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) ######### FLOPs evaluation ######### if(args.flops == 'True'): cal_flops(net) #########",
"for module_id in [net.module.layer1, net.module.layer2, net.module.layer3]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight",
"L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id) + ', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass",
"num_workers=2) else: trainset = datasets.ImageFolder(root=args.data_path+'/train', transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset",
"net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) ######### FLOPs evaluation ######### if(args.flops",
"default='False', choices=['True', 'False']) parser.add_argument(\"--eval_ortho\", help=\"evaluate how orthogonal a model is\", default='False', choices=['True', 'False'])",
"params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item())) for conv_ind",
"100. * correct/total if acc > best_acc: print(\"best accuracy:\", acc) best_acc = acc",
"transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) else: trainset = datasets.ImageFolder(root=args.data_path+'/train', transform=transform) trainloader",
"(angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=0, num=(L_diag.cpu()/L_angle.cpu()).item())) for conv_ind in [3, 7,",
"Load network or create new ######### if(args.train_acc == 'True' or args.test_acc == 'True'",
"+= loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() return",
"args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' criterion = nn.CrossEntropyLoss()",
"as_strings=False, print_per_layer_stat=False) print(' FLOPs: {:<8}'.format(flops)) # Compression Ratio def cal_compression_ratio(net_path, model): temp_path =",
"= [3,4,6,3] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag",
"= torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_2:",
"return net ######### Print model name ######### print((args.model).upper()) ######### Dataloader ######### if(args.train_acc ==",
"{:.2%}\".format(acc)) ######### Orthogonality evaluation ######### if(args.eval_ortho == 'True'): eval_ortho() if(args.finetune == 'True'): net_dict",
"L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=conv_ind, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'mobilenet'):",
"import torch import torchvision from torchvision import datasets, models, transforms import numpy as",
"help=\"evaluate how orthogonal a model is\", default='False', choices=['True', 'False']) parser.add_argument(\"--finetune\", help=\"fine-tune a model\",",
"inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets) test_loss += loss.item() _,",
"(angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -depthwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat = net.module.layers[lnum].conv2.weight params",
"angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}: {num:.2}\".format(ind=conv_ind,",
"Conv_{ind} -depthwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat = net.module.layers[lnum].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(),",
"params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1))",
"(L_diag.cpu()/L_angle.cpu()).item()) try: w_mat = module_id[b_id].shortcut[0].weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params =",
"# Fine-tune def finetune(net): net.train() train_loss = 0 correct = 0 total =",
"L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -depthwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) w_mat =",
"{:.3}\".format(base_size / model_size)) shutil.rmtree(temp_path) # Fine-tune def finetune(net): net.train() train_loss = 0 correct",
"######### if(args.eval_ortho == 'True'): eval_ortho() if(args.finetune == 'True'): net_dict = torch.load(args.model_path) net =",
"True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:])) else: net = torch.nn.DataParallel(MobileNet()) elif(name",
"model\", default='False', choices=['True', 'False']) parser.add_argument(\"--compression\", help=\"calculate compression ratio for model\", default='False', choices=['True', 'False'])",
"== True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned(cfg_p)) else: net = torch.nn.DataParallel(ResNet34()) elif(name",
"module_id[b_id].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1)) if(params.shape[1] < params.shape[0]): params = params.t() angle_mat = torch.matmul(params,",
"total = 0 for batch_idx, (inputs, targets) in enumerate(trainloader): inputs, targets = inputs.to(device),",
"flops in a model\", default='False', choices=['True', 'False']) parser.add_argument(\"--compression\", help=\"calculate compression ratio for model\",",
"mod_id = 0 for module_id in [net.module.layer1, net.module.layer2, net.module.layer3, net.module.layer4]: for b_id in",
"cal_compression_ratio(net_path=args.model_path, model=args.model) ######### Train accuracy evaluation ######### if(args.train_acc == 'True'): acc = cal_acc(net,",
"torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1))",
"lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd) while(lr_ind < len(base_sched)): optimizer.param_groups[0]['lr'] = base_sched[lr_ind] for n in range(base_epochs[lr_ind]):",
"######### Functions to evaluate different properties ######### # Accuracy def cal_acc(net, use_loader): net.eval()",
"'False']) parser.add_argument(\"--eval_ortho\", help=\"evaluate how orthogonal a model is\", default='False', choices=['True', 'False']) parser.add_argument(\"--finetune\", help=\"fine-tune",
"optim import os import shutil from models import * from pruner import *",
"if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(VGG_p(cfg_p)) else: net = torch.nn.DataParallel(VGG())",
"== 'resnet-56'): num_blocks = [9,9,9] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat =",
"a model is\", default='False', choices=['True', 'False']) parser.add_argument(\"--finetune\", help=\"fine-tune a model\", default='False', choices=['True', 'False'])",
"create_model(name, is_pruned): if(name == 'vgg'): if(is_pruned == True): cfg_p = net_dict['cfg'] net =",
"with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device)",
"True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(VGG_p(cfg_p)) else: net = torch.nn.DataParallel(VGG()) elif(name ==",
"test_loss = 0 correct = 0 total = 0 with torch.no_grad(): for batch_idx,",
"torch.nn.DataParallel(ResPruned_cifar(cfg_p)) else: net = torch.nn.DataParallel(ResNet56()) return net ######### Print model name ######### print((args.model).upper())",
"create_model(name=model, is_pruned=False) if os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path) state = {'net': base_model.state_dict()} torch.save(state, temp_path+'temp_base.pth') base_size",
"[3,4,6,3] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag =",
"analyzed a pruned model?\", default='False', choices=['True', 'False']) parser.add_argument(\"--train_acc\", help=\"evaluate train accuracy\", default='False', choices=['True',",
"import argparse ######### Parser ######### parser = argparse.ArgumentParser() parser.add_argument(\"-m\", \"--model\", help=\"architecture model to",
"'True' or args.finetune == 'True'): transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5,",
"net_dict['cfg'] net = torch.nn.DataParallel(VGG_p(cfg_p)) else: net = torch.nn.DataParallel(VGG()) elif(name == 'mobilenet'): if(is_pruned ==",
"transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) transform_test = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5,",
"shuffle=True, num_workers=2) testset = datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) #",
"if(args.train_acc == 'True' or args.test_acc == 'True' or args.flops=='True' or args.eval_ortho=='True'): net_dict =",
"= os.path.getsize(temp_path+'temp_base.pth') model_size = os.path.getsize(net_path) print(\" Compression ratio: {:.3}\".format(base_size / model_size)) shutil.rmtree(temp_path) #",
"a model\", default='False', choices=['True', 'False']) parser.add_argument(\"--compression\", help=\"calculate compression ratio for model\", default='False', choices=['True',",
"loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1) total += targets.size(0) correct",
"if(is_pruned == True): cfg_p = net_dict['cfg'] net = torch.nn.DataParallel(ResPruned(cfg_p)) else: net = torch.nn.DataParallel(ResNet34())",
"######### Load network or create new ######### if(args.train_acc == 'True' or args.test_acc ==",
"+= targets.size(0) correct += predicted.eq(targets).sum().item() return correct / total # FLOPs def cal_flops(net):",
"= 0 total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader):",
"total)) acc = 100. * correct/total if acc > best_acc: print(\"best accuracy:\", acc)",
"range(13): w_mat = net.module.layers[lnum].conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag =",
"batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR100(root='./../data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128,",
"in enumerate(testloader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs,",
"(%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) acc = 100. * correct/total if acc",
"0.5), (0.5, 0.5, 0.5))]) if(args.data_path=='CIFAR100'): trainset = torchvision.datasets.CIFAR100(root='./../data', train=True, download=True, transform=transform) trainloader =",
"net = torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:])) else: net = torch.nn.DataParallel(MobileNet()) elif(name == 'resnet'): if(is_pruned ==",
"0.5, 0.5), (0.5, 0.5, 0.5))]) transform_test = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5,",
"def eval_ortho(): if(args.model == 'vgg'): w_mat = net.module.features[0].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat =",
"is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) base_sched, base_epochs, wd = pruned_sched_iter, pruned_epochs_iter, wd_iter best_acc = 0 lr_ind",
"1 elif(args.model == 'resnet-56'): num_blocks = [9,9,9] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1))",
"= torch.nn.DataParallel(ResNet34()) elif(name == 'resnet-56'): if(is_pruned == True): cfg_p = net_dict['cfg'] net =",
"Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Orthogonality evaluator def eval_ortho():",
"net.module.features[0].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[0].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1,",
"b_mat1), dim=1) angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\"",
"torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id) + ', shortcut:",
"0.5, 0.5))]) if(args.data_path=='CIFAR100'): trainset = torchvision.datasets.CIFAR100(root='./../data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,",
"Conv_{ind} -pointwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'resnet'): num_blocks = [3,4,6,3] w_mat = net.module.conv1.weight",
"(angle_mat.norm(1)) print(\" Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for lnum in range(13): w_mat = net.module.layers[lnum].conv1.weight params =",
"if(args.data_path=='CIFAR100'): trainset = torchvision.datasets.CIFAR100(root='./../data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)",
"(train_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Orthogonality evaluator def eval_ortho(): if(args.model == 'vgg'): w_mat",
"[9,9,9] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params) L_diag =",
"use_loader=trainloader) print(\" Train accuracy: {:.2%}\".format(acc)) ######### Test accuracy evaluation ######### if(args.test_acc == 'True'):",
"total # FLOPs def cal_flops(net): with torch.cuda.device(0): flops, params = get_model_complexity_info(net, (3, 32,",
"correct, total)) # Orthogonality evaluator def eval_ortho(): if(args.model == 'vgg'): w_mat = net.module.features[0].weight",
"return correct / total # FLOPs def cal_flops(net): with torch.cuda.device(0): flops, params =",
"params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle",
"net.load_state_dict(net_dict['net']) ######### FLOPs evaluation ######### if(args.flops == 'True'): cal_flops(net) ######### Compression ratio evaluation",
"best_acc = acc ######### Load network or create new ######### if(args.train_acc == 'True'",
"* from pruner import * from config import * from ptflops import get_model_complexity_info",
"_, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(trainloader), 'Loss:",
"params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_base: {num:.2}\".format(num=(L_diag.cpu()/L_angle.cpu()).item())) for lnum in",
"= datasets.ImageFolder(root=args.data_path+'/test', transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) # Testing def test(net):",
"params.shape[0]): params = params.t() angle_mat = torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle =",
"= acc ######### Load network or create new ######### if(args.train_acc == 'True' or",
"'True'): cal_flops(net) ######### Compression ratio evaluation ######### if(args.compression == 'True'): cal_compression_ratio(net_path=args.model_path, model=args.model) #########",
"net_dict = torch.load(args.model_path) net = create_model(name=args.model, is_pruned=(args.pruned=='True')) net.load_state_dict(net_dict['net']) base_sched, base_epochs, wd = pruned_sched_iter,",
"in [net.module.layer1, net.module.layer2, net.module.layer3, net.module.layer4]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params",
"= torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) else: trainset = datasets.ImageFolder(root=args.data_path+'/train', transform=transform) trainloader = torch.utils.data.DataLoader(trainset,",
"= cal_acc(net, use_loader=testloader) print(\" Test accuracy: {:.2%}\".format(acc)) ######### Orthogonality evaluation ######### if(args.eval_ortho ==",
"be analyzed is stored\", default='0') parser.add_argument(\"--data_path\", help=\"path to dataset\", default='CIFAR100') parser.add_argument(\"--pruned\", help=\"is the",
"'True' or args.test_acc == 'True' or args.flops=='True' or args.eval_ortho=='True'): net_dict = torch.load(args.model_path) net",
"= net_dict['cfg'] net = torch.nn.DataParallel(MobileNet_p(cfg_p[0], cfg_p[1:])) else: net = torch.nn.DataParallel(MobileNet()) elif(name == 'resnet'):",
"correct += predicted.eq(targets).sum().item() return correct / total # FLOPs def cal_flops(net): with torch.cuda.device(0):",
"accuracy\", default='False', choices=['True', 'False']) parser.add_argument(\"--flops\", help=\"calculate flops in a model\", default='False', choices=['True', 'False'])",
"dim=1) angle_mat = torch.matmul(torch.t(params), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind}:",
"dataset\", default='CIFAR100') parser.add_argument(\"--pruned\", help=\"is the model to be analyzed a pruned model?\", default='False',",
"model to be analyzed\", default='vgg', choices=['vgg', 'mobilenet', 'resnet', 'resnet-56']) parser.add_argument(\"--model_path\", help=\"path where the",
"print(' layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) w_mat = module_id[b_id].conv2.weight params = (w_mat.reshape(w_mat.shape[0],-1))",
"optimizer = optim.SGD(net.parameters(), lr=base_sched[lr_ind], momentum=0.9, weight_decay=wd) while(lr_ind < len(base_sched)): optimizer.param_groups[0]['lr'] = base_sched[lr_ind] for",
"import shutil from models import * from pruner import * from config import",
"acc = 100. * correct/total if acc > best_acc: print(\"best accuracy:\", acc) best_acc",
"torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -depthwise: {num:.2}\".format(ind=lnum, num=(L_diag.cpu()/L_angle.cpu()).item()))",
"= torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(\" Conv_{ind} -depthwise: {num:.2}\".format(ind=lnum,",
"14, 17, 21, 24, 28, 31]: w_mat = net.module.features[conv_ind].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat",
"(w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[conv_ind].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat",
"accuracy:\", acc) best_acc = acc ######### Load network or create new ######### if(args.train_acc",
"= (b_mat.reshape(b_mat.shape[0],-1)) params = torch.cat((w_mat1, b_mat1), dim=1) angle_mat = torch.matmul(torch.t(params), params) L_diag =",
"import * from config import * from ptflops import get_model_complexity_info import argparse #########",
"[net.module.layer1, net.module.layer2, net.module.layer3, net.module.layer4]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight params =",
"num_blocks = [9,9,9] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(torch.t(params), params)",
"######### if(args.compression == 'True'): cal_compression_ratio(net_path=args.model_path, model=args.model) ######### Train accuracy evaluation ######### if(args.train_acc ==",
"parser.add_argument(\"--data_path\", help=\"path to dataset\", default='CIFAR100') parser.add_argument(\"--pruned\", help=\"is the model to be analyzed a",
"how orthogonal a model is\", default='False', choices=['True', 'False']) parser.add_argument(\"--finetune\", help=\"fine-tune a model\", default='False',",
"batch_idx, (inputs, targets) in enumerate(use_loader): inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs)",
"= create_model(name=model, is_pruned=False) if os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path) state = {'net': base_model.state_dict()} torch.save(state, temp_path+'temp_base.pth')",
"torch.matmul(params, torch.t(params)) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_2: %.2f'",
"module_id in [net.module.layer1, net.module.layer2, net.module.layer3, net.module.layer4]: for b_id in range(num_blocks[mod_id]): w_mat = module_id[b_id].conv1.weight",
"params = (w_mat.reshape(w_mat.shape[0],-1)) angle_mat = torch.matmul(params.t(), params) L_diag = (angle_mat.diag().norm(1)) L_angle = (angle_mat.norm(1))",
"31]: w_mat = net.module.features[conv_ind].weight w_mat1 = (w_mat.reshape(w_mat.shape[0],-1)) b_mat = net.module.features[conv_ind].bias b_mat1 = (b_mat.reshape(b_mat.shape[0],-1))",
"total += targets.size(0) correct += predicted.eq(targets).sum().item() progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%%",
"num=(L_diag.cpu()/L_angle.cpu()).item())) elif(args.model == 'resnet'): num_blocks = [3,4,6,3] w_mat = net.module.conv1.weight params = (w_mat.reshape(w_mat.shape[0],-1))",
"loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = outputs.max(1)",
"', shortcut: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) except: pass mod_id += 1 elif(args.model == 'resnet-56'):",
"(angle_mat.norm(1)) print(' layer_'+str(mod_id)+', block', str(b_id)+'_1: %.2f' % (L_diag.cpu()/L_angle.cpu()).item()) w_mat = module_id[b_id].conv2.weight params =",
"%.3f | Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Orthogonality evaluator",
"cal_flops(net) ######### Compression ratio evaluation ######### if(args.compression == 'True'): cal_compression_ratio(net_path=args.model_path, model=args.model) ######### Train",
"'mobilenet', 'resnet', 'resnet-56']) parser.add_argument(\"--model_path\", help=\"path where the model to be analyzed is stored\",",
"model for evaluation#net = torch.nn.DataParallel(VGG()) def create_model(name, is_pruned): if(name == 'vgg'): if(is_pruned ==",
"is_pruned=False) if os.path.exists(temp_path): shutil.rmtree(temp_path) os.mkdir(temp_path) state = {'net': base_model.state_dict()} torch.save(state, temp_path+'temp_base.pth') base_size ="
] |
[
"if rc == self.STATUS_SUCCESS: self.module.exit_json(changed=True) elif rc == self.STATUS_NOT_CHANGED: self.module.exit_json(changed=False) else: self.module.fail_json(msg =",
"dict(type='str') ) ) def build_command(self): params = self.module.params command = [ self.module.get_bin_path('firebrew'), {'present':",
"coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import from __future__",
"def build_command(self): params = self.module.params command = [ self.module.get_bin_path('firebrew'), {'present': 'install', 'absent': 'uninstall'}[params['state']],",
"= 1 STATUS_NOT_CHANGED = 2 def __init__(self, AnsibleModule = AnsibleModule): self.module = AnsibleModule(",
"default='present', choices=['present', 'absent']), name = dict(type='str', required=True), base_dir = dict(type='str'), profile = dict(type='str'),",
"= dict(type='str', default='present', choices=['present', 'absent']), name = dict(type='str', required=True), base_dir = dict(type='str'), profile",
"['base_dir','profile','firefox']: if opt in params and params[opt] != None and params[opt].strip() != '':",
"self.module.run_command(self.build_command()) if rc == self.STATUS_SUCCESS: self.module.exit_json(changed=True) elif rc == self.STATUS_NOT_CHANGED: self.module.exit_json(changed=False) else: self.module.fail_json(msg",
"name = dict(type='str', required=True), base_dir = dict(type='str'), profile = dict(type='str'), firefox = dict(type='str')",
"__init__(self, AnsibleModule = AnsibleModule): self.module = AnsibleModule( argument_spec = dict( state = dict(type='str',",
"= self.module.params command = [ self.module.get_bin_path('firebrew'), {'present': 'install', 'absent': 'uninstall'}[params['state']], pipes.quote(params['name']) ] for",
"AnsibleModule = AnsibleModule): self.module = AnsibleModule( argument_spec = dict( state = dict(type='str', default='present',",
"python # -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import",
"= 2 def __init__(self, AnsibleModule = AnsibleModule): self.module = AnsibleModule( argument_spec = dict(",
"-*- from __future__ import unicode_literals from __future__ import absolute_import from __future__ import generators",
"import division import pipes from ansible.module_utils.basic import * class Firebrew(object): STATUS_SUCCESS = 0",
"* class Firebrew(object): STATUS_SUCCESS = 0 STATUS_FAILURE = 1 STATUS_NOT_CHANGED = 2 def",
"STATUS_FAILURE = 1 STATUS_NOT_CHANGED = 2 def __init__(self, AnsibleModule = AnsibleModule): self.module =",
"= dict(type='str', required=True), base_dir = dict(type='str'), profile = dict(type='str'), firefox = dict(type='str') )",
"'': command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt]))) return ' '.join(command) def execute(self): (rc,out,err) = self.module.run_command(self.build_command())",
"unicode_literals from __future__ import absolute_import from __future__ import generators from __future__ import division",
"None and params[opt].strip() != '': command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt]))) return ' '.join(command) def",
"pipes from ansible.module_utils.basic import * class Firebrew(object): STATUS_SUCCESS = 0 STATUS_FAILURE = 1",
"ansible.module_utils.basic import * class Firebrew(object): STATUS_SUCCESS = 0 STATUS_FAILURE = 1 STATUS_NOT_CHANGED =",
"Firebrew(object): STATUS_SUCCESS = 0 STATUS_FAILURE = 1 STATUS_NOT_CHANGED = 2 def __init__(self, AnsibleModule",
"' '.join(command) def execute(self): (rc,out,err) = self.module.run_command(self.build_command()) if rc == self.STATUS_SUCCESS: self.module.exit_json(changed=True) elif",
") def build_command(self): params = self.module.params command = [ self.module.get_bin_path('firebrew'), {'present': 'install', 'absent':",
"generators from __future__ import division import pipes from ansible.module_utils.basic import * class Firebrew(object):",
"= AnsibleModule( argument_spec = dict( state = dict(type='str', default='present', choices=['present', 'absent']), name =",
"= dict(type='str'), firefox = dict(type='str') ) ) def build_command(self): params = self.module.params command",
"] for opt in ['base_dir','profile','firefox']: if opt in params and params[opt] != None",
"'absent': 'uninstall'}[params['state']], pipes.quote(params['name']) ] for opt in ['base_dir','profile','firefox']: if opt in params and",
"state = dict(type='str', default='present', choices=['present', 'absent']), name = dict(type='str', required=True), base_dir = dict(type='str'),",
"import unicode_literals from __future__ import absolute_import from __future__ import generators from __future__ import",
"= [ self.module.get_bin_path('firebrew'), {'present': 'install', 'absent': 'uninstall'}[params['state']], pipes.quote(params['name']) ] for opt in ['base_dir','profile','firefox']:",
"rc == self.STATUS_NOT_CHANGED: self.module.exit_json(changed=False) else: self.module.fail_json(msg = err) if __name__ == '__main__': Firebrew().execute()",
"def __init__(self, AnsibleModule = AnsibleModule): self.module = AnsibleModule( argument_spec = dict( state =",
"in params and params[opt] != None and params[opt].strip() != '': command.append('--%s=%s' % (opt.replace('_','-'),",
"profile = dict(type='str'), firefox = dict(type='str') ) ) def build_command(self): params = self.module.params",
"division import pipes from ansible.module_utils.basic import * class Firebrew(object): STATUS_SUCCESS = 0 STATUS_FAILURE",
"#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__",
"params[opt] != None and params[opt].strip() != '': command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt]))) return '",
"= self.module.run_command(self.build_command()) if rc == self.STATUS_SUCCESS: self.module.exit_json(changed=True) elif rc == self.STATUS_NOT_CHANGED: self.module.exit_json(changed=False) else:",
"from __future__ import generators from __future__ import division import pipes from ansible.module_utils.basic import",
"'.join(command) def execute(self): (rc,out,err) = self.module.run_command(self.build_command()) if rc == self.STATUS_SUCCESS: self.module.exit_json(changed=True) elif rc",
"class Firebrew(object): STATUS_SUCCESS = 0 STATUS_FAILURE = 1 STATUS_NOT_CHANGED = 2 def __init__(self,",
"self.module = AnsibleModule( argument_spec = dict( state = dict(type='str', default='present', choices=['present', 'absent']), name",
"dict(type='str', required=True), base_dir = dict(type='str'), profile = dict(type='str'), firefox = dict(type='str') ) )",
"(opt.replace('_','-'), pipes.quote(params[opt]))) return ' '.join(command) def execute(self): (rc,out,err) = self.module.run_command(self.build_command()) if rc ==",
"from __future__ import division import pipes from ansible.module_utils.basic import * class Firebrew(object): STATUS_SUCCESS",
"% (opt.replace('_','-'), pipes.quote(params[opt]))) return ' '.join(command) def execute(self): (rc,out,err) = self.module.run_command(self.build_command()) if rc",
"1 STATUS_NOT_CHANGED = 2 def __init__(self, AnsibleModule = AnsibleModule): self.module = AnsibleModule( argument_spec",
"base_dir = dict(type='str'), profile = dict(type='str'), firefox = dict(type='str') ) ) def build_command(self):",
"== self.STATUS_SUCCESS: self.module.exit_json(changed=True) elif rc == self.STATUS_NOT_CHANGED: self.module.exit_json(changed=False) else: self.module.fail_json(msg = err) if",
"self.module.params command = [ self.module.get_bin_path('firebrew'), {'present': 'install', 'absent': 'uninstall'}[params['state']], pipes.quote(params['name']) ] for opt",
"AnsibleModule): self.module = AnsibleModule( argument_spec = dict( state = dict(type='str', default='present', choices=['present', 'absent']),",
"utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import from __future__ import",
"= dict(type='str') ) ) def build_command(self): params = self.module.params command = [ self.module.get_bin_path('firebrew'),",
"'absent']), name = dict(type='str', required=True), base_dir = dict(type='str'), profile = dict(type='str'), firefox =",
"required=True), base_dir = dict(type='str'), profile = dict(type='str'), firefox = dict(type='str') ) ) def",
"self.STATUS_SUCCESS: self.module.exit_json(changed=True) elif rc == self.STATUS_NOT_CHANGED: self.module.exit_json(changed=False) else: self.module.fail_json(msg = err) if __name__",
"= dict(type='str'), profile = dict(type='str'), firefox = dict(type='str') ) ) def build_command(self): params",
"__future__ import generators from __future__ import division import pipes from ansible.module_utils.basic import *",
"= AnsibleModule): self.module = AnsibleModule( argument_spec = dict( state = dict(type='str', default='present', choices=['present',",
"__future__ import absolute_import from __future__ import generators from __future__ import division import pipes",
"pipes.quote(params['name']) ] for opt in ['base_dir','profile','firefox']: if opt in params and params[opt] !=",
"import generators from __future__ import division import pipes from ansible.module_utils.basic import * class",
"argument_spec = dict( state = dict(type='str', default='present', choices=['present', 'absent']), name = dict(type='str', required=True),",
"STATUS_NOT_CHANGED = 2 def __init__(self, AnsibleModule = AnsibleModule): self.module = AnsibleModule( argument_spec =",
"dict( state = dict(type='str', default='present', choices=['present', 'absent']), name = dict(type='str', required=True), base_dir =",
"dict(type='str'), profile = dict(type='str'), firefox = dict(type='str') ) ) def build_command(self): params =",
"rc == self.STATUS_SUCCESS: self.module.exit_json(changed=True) elif rc == self.STATUS_NOT_CHANGED: self.module.exit_json(changed=False) else: self.module.fail_json(msg = err)",
"opt in ['base_dir','profile','firefox']: if opt in params and params[opt] != None and params[opt].strip()",
"0 STATUS_FAILURE = 1 STATUS_NOT_CHANGED = 2 def __init__(self, AnsibleModule = AnsibleModule): self.module",
"'install', 'absent': 'uninstall'}[params['state']], pipes.quote(params['name']) ] for opt in ['base_dir','profile','firefox']: if opt in params",
"and params[opt] != None and params[opt].strip() != '': command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt]))) return",
"choices=['present', 'absent']), name = dict(type='str', required=True), base_dir = dict(type='str'), profile = dict(type='str'), firefox",
"build_command(self): params = self.module.params command = [ self.module.get_bin_path('firebrew'), {'present': 'install', 'absent': 'uninstall'}[params['state']], pipes.quote(params['name'])",
"for opt in ['base_dir','profile','firefox']: if opt in params and params[opt] != None and",
"pipes.quote(params[opt]))) return ' '.join(command) def execute(self): (rc,out,err) = self.module.run_command(self.build_command()) if rc == self.STATUS_SUCCESS:",
"from __future__ import unicode_literals from __future__ import absolute_import from __future__ import generators from",
"-*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import from",
"!= '': command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt]))) return ' '.join(command) def execute(self): (rc,out,err) =",
"execute(self): (rc,out,err) = self.module.run_command(self.build_command()) if rc == self.STATUS_SUCCESS: self.module.exit_json(changed=True) elif rc == self.STATUS_NOT_CHANGED:",
"import absolute_import from __future__ import generators from __future__ import division import pipes from",
"import * class Firebrew(object): STATUS_SUCCESS = 0 STATUS_FAILURE = 1 STATUS_NOT_CHANGED = 2",
"params[opt].strip() != '': command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt]))) return ' '.join(command) def execute(self): (rc,out,err)",
"dict(type='str', default='present', choices=['present', 'absent']), name = dict(type='str', required=True), base_dir = dict(type='str'), profile =",
"params = self.module.params command = [ self.module.get_bin_path('firebrew'), {'present': 'install', 'absent': 'uninstall'}[params['state']], pipes.quote(params['name']) ]",
"firefox = dict(type='str') ) ) def build_command(self): params = self.module.params command = [",
"import pipes from ansible.module_utils.basic import * class Firebrew(object): STATUS_SUCCESS = 0 STATUS_FAILURE =",
"opt in params and params[opt] != None and params[opt].strip() != '': command.append('--%s=%s' %",
"and params[opt].strip() != '': command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt]))) return ' '.join(command) def execute(self):",
"self.module.exit_json(changed=True) elif rc == self.STATUS_NOT_CHANGED: self.module.exit_json(changed=False) else: self.module.fail_json(msg = err) if __name__ ==",
"absolute_import from __future__ import generators from __future__ import division import pipes from ansible.module_utils.basic",
"return ' '.join(command) def execute(self): (rc,out,err) = self.module.run_command(self.build_command()) if rc == self.STATUS_SUCCESS: self.module.exit_json(changed=True)",
"from ansible.module_utils.basic import * class Firebrew(object): STATUS_SUCCESS = 0 STATUS_FAILURE = 1 STATUS_NOT_CHANGED",
"[ self.module.get_bin_path('firebrew'), {'present': 'install', 'absent': 'uninstall'}[params['state']], pipes.quote(params['name']) ] for opt in ['base_dir','profile','firefox']: if",
"= 0 STATUS_FAILURE = 1 STATUS_NOT_CHANGED = 2 def __init__(self, AnsibleModule = AnsibleModule):",
"from __future__ import absolute_import from __future__ import generators from __future__ import division import",
"def execute(self): (rc,out,err) = self.module.run_command(self.build_command()) if rc == self.STATUS_SUCCESS: self.module.exit_json(changed=True) elif rc ==",
"elif rc == self.STATUS_NOT_CHANGED: self.module.exit_json(changed=False) else: self.module.fail_json(msg = err) if __name__ == '__main__':",
"!= None and params[opt].strip() != '': command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt]))) return ' '.join(command)",
"= dict( state = dict(type='str', default='present', choices=['present', 'absent']), name = dict(type='str', required=True), base_dir",
"params and params[opt] != None and params[opt].strip() != '': command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt])))",
"2 def __init__(self, AnsibleModule = AnsibleModule): self.module = AnsibleModule( argument_spec = dict( state",
"if opt in params and params[opt] != None and params[opt].strip() != '': command.append('--%s=%s'",
"__future__ import unicode_literals from __future__ import absolute_import from __future__ import generators from __future__",
"in ['base_dir','profile','firefox']: if opt in params and params[opt] != None and params[opt].strip() !=",
"self.module.get_bin_path('firebrew'), {'present': 'install', 'absent': 'uninstall'}[params['state']], pipes.quote(params['name']) ] for opt in ['base_dir','profile','firefox']: if opt",
"{'present': 'install', 'absent': 'uninstall'}[params['state']], pipes.quote(params['name']) ] for opt in ['base_dir','profile','firefox']: if opt in",
"(rc,out,err) = self.module.run_command(self.build_command()) if rc == self.STATUS_SUCCESS: self.module.exit_json(changed=True) elif rc == self.STATUS_NOT_CHANGED: self.module.exit_json(changed=False)",
") ) def build_command(self): params = self.module.params command = [ self.module.get_bin_path('firebrew'), {'present': 'install',",
"command = [ self.module.get_bin_path('firebrew'), {'present': 'install', 'absent': 'uninstall'}[params['state']], pipes.quote(params['name']) ] for opt in",
"command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt]))) return ' '.join(command) def execute(self): (rc,out,err) = self.module.run_command(self.build_command()) if",
"__future__ import division import pipes from ansible.module_utils.basic import * class Firebrew(object): STATUS_SUCCESS =",
"AnsibleModule( argument_spec = dict( state = dict(type='str', default='present', choices=['present', 'absent']), name = dict(type='str',",
"'uninstall'}[params['state']], pipes.quote(params['name']) ] for opt in ['base_dir','profile','firefox']: if opt in params and params[opt]",
"# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import",
"dict(type='str'), firefox = dict(type='str') ) ) def build_command(self): params = self.module.params command =",
"STATUS_SUCCESS = 0 STATUS_FAILURE = 1 STATUS_NOT_CHANGED = 2 def __init__(self, AnsibleModule ="
] |
[
"input().split())) def is_consecutive(a): return max(a) - min(a) + 1 == len(a) == len(frozenset(a))",
"len(a) == len(frozenset(a)) for _ in range(int(input())): input() # don't need n print('Yes'",
"for _ in range(int(input())): input() # don't need n print('Yes' if is_consecutive(read_record()) else",
"read_record(): return list(map(int, input().split())) def is_consecutive(a): return max(a) - min(a) + 1 ==",
"<filename>main/consecutive-array-elements/consecutive-array-elements.py #!/usr/bin/env python3 def read_record(): return list(map(int, input().split())) def is_consecutive(a): return max(a) -",
"+ 1 == len(a) == len(frozenset(a)) for _ in range(int(input())): input() # don't",
"- min(a) + 1 == len(a) == len(frozenset(a)) for _ in range(int(input())): input()",
"return list(map(int, input().split())) def is_consecutive(a): return max(a) - min(a) + 1 == len(a)",
"list(map(int, input().split())) def is_consecutive(a): return max(a) - min(a) + 1 == len(a) ==",
"min(a) + 1 == len(a) == len(frozenset(a)) for _ in range(int(input())): input() #",
"is_consecutive(a): return max(a) - min(a) + 1 == len(a) == len(frozenset(a)) for _",
"def is_consecutive(a): return max(a) - min(a) + 1 == len(a) == len(frozenset(a)) for",
"def read_record(): return list(map(int, input().split())) def is_consecutive(a): return max(a) - min(a) + 1",
"python3 def read_record(): return list(map(int, input().split())) def is_consecutive(a): return max(a) - min(a) +",
"== len(a) == len(frozenset(a)) for _ in range(int(input())): input() # don't need n",
"== len(frozenset(a)) for _ in range(int(input())): input() # don't need n print('Yes' if",
"max(a) - min(a) + 1 == len(a) == len(frozenset(a)) for _ in range(int(input())):",
"1 == len(a) == len(frozenset(a)) for _ in range(int(input())): input() # don't need",
"len(frozenset(a)) for _ in range(int(input())): input() # don't need n print('Yes' if is_consecutive(read_record())",
"_ in range(int(input())): input() # don't need n print('Yes' if is_consecutive(read_record()) else 'No')",
"return max(a) - min(a) + 1 == len(a) == len(frozenset(a)) for _ in",
"#!/usr/bin/env python3 def read_record(): return list(map(int, input().split())) def is_consecutive(a): return max(a) - min(a)"
] |
[
"[['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] lkp = etl.lookup(table1, 'foo', 'bar')",
"lkp = etl.lookup(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] #",
"the # shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookup(table1,",
"['b', 3, True], ['b', 3, False]] lkp = etl.dictlookupone(table2, ('foo', 'bar')) lkp[('a', 1)]",
"3, True], ['b', 3, False]] lkp = etl.lookup(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)]",
"first value wins lkp = etl.dictlookupone(table1, 'foo') lkp['a'] lkp['b'] # if the specified",
"1], ['b', 2], ['b', 3]] lkp = etl.dictlookup(table1, 'foo') lkp['a'] lkp['b'] # compound",
"'bar') lkp['a'] lkp['b'] # if the specified key is not unique and strict=True,",
"'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # lookupone() #############",
"[['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] # if the specified key",
"etl.errors.DuplicateKeyError as e: print(e) # compound keys are supported table2 = [['foo', 'bar',",
"= [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] lkp = etl.lookup(table1, 'foo',",
"'foo', 'bar') lkp['a'] lkp['b'] # if the specified key is not unique and",
"lkp[('b', 2)] lkp[('b', 3)] # data can be loaded into an existing dictionary-like",
"2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.dictlookupone(table2, ('foo', 'bar'))",
"unique and strict=False (default), # the first value wins lkp = etl.lookupone(table1, 'foo',",
"lkp = etl.lookupone(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b']",
"['b', 3]] lkp = etl.dictlookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are supported",
"supported table2 = [['foo', 'bar', 'baz'], ['a', 1, True], ['b', 2, False], ['b',",
"3]] lkp = etl.lookup(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if no valuespec argument",
"wins lkp = etl.lookupone(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if the specified key",
"lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookupone(table1, 'foo', lkp) lkp.close() lkp = shelve.open('example.dat',",
"is not unique and strict=False (default), # the first value wins lkp =",
"['b', 3, True], ['b', 3, False]] lkp = etl.dictlookup(table2, ('foo', 'bar')) lkp[('a', 1)]",
"'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can be loaded into",
"table1 = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] lkp = etl.dictlookup(table1,",
"################# import petl as etl table1 = [['foo', 'bar'], ['a', 1], ['b', 2],",
"lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # lookupone() ############# import petl",
"'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can be loaded into",
"the whole # row (as a tuple) lkp = etl.lookup(table1, 'foo') lkp['a'] lkp['b']",
"lkp['a'] lkp['b'] # if the specified key is not unique and strict=True, will",
"shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookupone(table1, 'foo', lkp)",
"lookupone() ############# import petl as etl table1 = [['foo', 'bar'], ['a', 1], ['b',",
"DuplicateKeyError try: lkp = etl.lookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e) #",
"lkp['b'] # if no valuespec argument is given, defaults to the whole #",
"shelve.open('example.dat', flag='n') lkp = etl.lookup(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r')",
"True], ['b', 2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.lookupone(table2,",
"'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # lookupone() ############# import",
"['b', 3, False]] lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)]",
"1], ['b', 2], ['b', 3]] # if the specified key is not unique",
"the specified key is not unique and strict=False (default), # the first value",
"lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # lookupone() ############# import petl as",
"('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can be",
"unique and strict=False (default), # the first value wins lkp = etl.dictlookupone(table1, 'foo')",
"and strict=True, will raise # DuplicateKeyError try: lkp = etl.dictlookupone(table1, 'foo', strict=True) except",
"lkp = etl.dictlookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e) # compound keys",
"DuplicateKeyError try: lkp = etl.dictlookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e) #",
"lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] #",
"etl.dictlookupone(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can be",
"strict=False (default), # the first value wins lkp = etl.lookupone(table1, 'foo', 'bar') lkp['a']",
"(default), # the first value wins lkp = etl.dictlookupone(table1, 'foo') lkp['a'] lkp['b'] #",
"lkp = etl.lookup(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b']",
"shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookupone(table1, 'foo', lkp) lkp.close() lkp =",
"# if no valuespec argument is given, defaults to the whole # row",
"lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookupone() ################# import petl as",
"lkp['a'] lkp['b'] # dictlookup() ############## import petl as etl table1 = [['foo', 'bar'],",
"flag='n') lkp = etl.lookup(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a']",
"True], ['b', 2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.lookup(table2,",
"= [['foo', 'bar', 'baz'], ['a', 1, True], ['b', 2, False], ['b', 3, True],",
"False], ['b', 3, True], ['b', 3, False]] lkp = etl.dictlookupone(table2, ('foo', 'bar')) lkp[('a',",
"compound keys are supported table2 = [['foo', 'bar', 'baz'], ['a', 1, True], ['b',",
"strict=True, will raise # DuplicateKeyError try: lkp = etl.dictlookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError",
"created via the # shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp",
"the first value wins lkp = etl.lookupone(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if",
"['b', 2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.dictlookupone(table2, ('foo',",
"['b', 2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.lookupone(table2, ('foo',",
"the # shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookupone(table1,",
"[['foo', 'bar', 'baz'], ['a', 1, True], ['b', 2, False], ['b', 3, True], ['b',",
"2], ['b', 3]] lkp = etl.dictlookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are",
"= etl.dictlookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are supported table2 = [['foo',",
"lkp = etl.dictlookup(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data",
"= etl.dictlookupone(table1, 'foo') lkp['a'] lkp['b'] # if the specified key is not unique",
"3, True], ['b', 3, False]] lkp = etl.dictlookupone(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b',",
"# lookup() ########## import petl as etl table1 = [['foo', 'bar'], ['a', 1],",
"raise # DuplicateKeyError try: lkp = etl.dictlookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e:",
"shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookup(table1, 'foo', lkp)",
"specified key is not unique and strict=False (default), # the first value wins",
"valuespec argument is given, defaults to the whole # row (as a tuple)",
"e: print(e) # compound keys are supported table2 = [['foo', 'bar', 'baz'], ['a',",
"to the whole # row (as a tuple) lkp = etl.lookup(table1, 'foo') lkp['a']",
"= etl.lookup(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] #",
"unique and strict=True, will raise # DuplicateKeyError try: lkp = etl.dictlookupone(table1, 'foo', strict=True)",
"dictionaries created via the # shelve module import shelve lkp = shelve.open('example.dat', flag='n')",
"raise # DuplicateKeyError try: lkp = etl.lookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e:",
"['b', 3, False]] lkp = etl.dictlookup(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b',",
"not unique and strict=True, will raise # DuplicateKeyError try: lkp = etl.dictlookupone(table1, 'foo',",
"module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookup(table1, 'foo', 'bar', lkp)",
"3]] # if the specified key is not unique and strict=False (default), #",
"persistent dictionaries created via the # shelve module import shelve lkp = shelve.open('example.dat',",
"module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookupone(table1, 'foo', lkp) lkp.close()",
"lkp = etl.dictlookupone(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data",
"= etl.lookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are supported table2 = [['foo',",
"3, True], ['b', 3, False]] lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)]",
"the specified key is not unique and strict=True, will raise # DuplicateKeyError try:",
"3, True], ['b', 3, False]] lkp = etl.dictlookup(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b',",
"['a', 1], ['b', 2], ['b', 3]] lkp = etl.dictlookup(table1, 'foo') lkp['a'] lkp['b'] #",
"['a', 1, True], ['b', 2, False], ['b', 3, True], ['b', 3, False]] lkp",
"'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookup() ############## import",
"3]] lkp = etl.dictlookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are supported table2",
"lkp['a'] lkp['b'] # lookupone() ############# import petl as etl table1 = [['foo', 'bar'],",
"False]] lkp = etl.lookup(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)]",
"'bar'], ['a', 1], ['b', 2], ['b', 3]] # if the specified key is",
"False], ['b', 3, True], ['b', 3, False]] lkp = etl.dictlookup(table2, ('foo', 'bar')) lkp[('a',",
"lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookup() ############## import petl as etl",
"lkp['b'] # compound keys are supported table2 = [['foo', 'bar', 'baz'], ['a', 1,",
"# if the specified key is not unique and strict=False (default), # the",
"defaults to the whole # row (as a tuple) lkp = etl.lookup(table1, 'foo')",
"import division, print_function, absolute_import # lookup() ########## import petl as etl table1 =",
"[['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] lkp = etl.dictlookup(table1, 'foo') lkp['a']",
"= etl.dictlookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e) # compound keys are",
"True], ['b', 3, False]] lkp = etl.dictlookup(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)]",
"# dictlookupone() ################# import petl as etl table1 = [['foo', 'bar'], ['a', 1],",
"lkp['b'] # dictlookup() ############## import petl as etl table1 = [['foo', 'bar'], ['a',",
"if no valuespec argument is given, defaults to the whole # row (as",
"lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # lookupone() ############# import petl as etl",
"object, including persistent dictionaries created via the # shelve module import shelve lkp",
"'baz'], ['a', 1, True], ['b', 2, False], ['b', 3, True], ['b', 3, False]]",
"argument is given, defaults to the whole # row (as a tuple) lkp",
"False]] lkp = etl.dictlookupone(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] #",
"will raise # DuplicateKeyError try: lkp = etl.dictlookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as",
"lkp['a'] lkp['b'] # if no valuespec argument is given, defaults to the whole",
"including persistent dictionaries created via the # shelve module import shelve lkp =",
"########## import petl as etl table1 = [['foo', 'bar'], ['a', 1], ['b', 2],",
"etl.lookupone(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can",
"'foo', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookupone() ################# import",
"['b', 2], ['b', 3]] # if the specified key is not unique and",
"lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookupone() ################# import petl as etl",
"False], ['b', 3, True], ['b', 3, False]] lkp = etl.lookup(table2, ('foo', 'bar'), 'baz')",
"'bar') lkp['a'] lkp['b'] # if no valuespec argument is given, defaults to the",
"1)] lkp[('b', 2)] lkp[('b', 3)] # data can be loaded into an existing",
"table1 = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] # if the",
"lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookup() ############## import petl as",
"['b', 3, False]] lkp = etl.lookup(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)]",
"not unique and strict=False (default), # the first value wins lkp = etl.lookupone(table1,",
"lkp = etl.lookupone(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if the specified key is",
"and strict=False (default), # the first value wins lkp = etl.dictlookupone(table1, 'foo') lkp['a']",
"as etl table1 = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] #",
"is given, defaults to the whole # row (as a tuple) lkp =",
"shelve.open('example.dat', flag='n') lkp = etl.dictlookup(table1, 'foo', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a']",
"False]] lkp = etl.dictlookup(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] #",
"lkp = shelve.open('example.dat', flag='n') lkp = etl.lookupone(table1, 'foo', 'bar', lkp) lkp.close() lkp =",
"# DuplicateKeyError try: lkp = etl.lookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e)",
"['b', 2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.lookup(table2, ('foo',",
"etl.lookup(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can",
"lkp['a'] lkp['b'] # dictlookupone() ################# import petl as etl table1 = [['foo', 'bar'],",
"lkp = etl.lookup(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if no valuespec argument is",
"# the first value wins lkp = etl.dictlookupone(table1, 'foo') lkp['a'] lkp['b'] # if",
"['b', 2], ['b', 3]] lkp = etl.dictlookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys",
"lkp = etl.dictlookupone(table1, 'foo') lkp['a'] lkp['b'] # if the specified key is not",
"'bar', 'baz'], ['a', 1, True], ['b', 2, False], ['b', 3, True], ['b', 3,",
"module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookup(table1, 'foo', lkp) lkp.close()",
"= shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookupone() ################# import petl as etl table1",
"into an existing dictionary-like # object, including persistent dictionaries created via the #",
"wins lkp = etl.dictlookupone(table1, 'foo') lkp['a'] lkp['b'] # if the specified key is",
"# shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookup(table1, 'foo',",
"= etl.dictlookup(table1, 'foo', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookupone()",
"# if the specified key is not unique and strict=True, will raise #",
"= etl.lookup(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data",
"table1 = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] lkp = etl.lookup(table1,",
"strict=True, will raise # DuplicateKeyError try: lkp = etl.lookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError",
"the # shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookupone(table1,",
"= etl.lookup(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if no valuespec argument is given,",
"2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.dictlookup(table2, ('foo', 'bar'))",
"2], ['b', 3]] lkp = etl.lookup(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if no",
"= shelve.open('example.dat', flag='n') lkp = etl.lookup(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat',",
"lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can be loaded into an",
"shelve.open('example.dat', flag='n') lkp = etl.lookupone(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r')",
"can be loaded into an existing dictionary-like # object, including persistent dictionaries created",
"via the # shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp =",
"['b', 3, True], ['b', 3, False]] lkp = etl.lookup(table2, ('foo', 'bar'), 'baz') lkp[('a',",
"the # shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookup(table1,",
"'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookup() ##############",
"= [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] lkp = etl.dictlookup(table1, 'foo')",
"etl.lookupone(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if the specified key is not unique",
"lkp = etl.dictlookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are supported table2 =",
"an existing dictionary-like # object, including persistent dictionaries created via the # shelve",
"3, False]] lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b',",
"'foo') lkp['a'] lkp['b'] # if the specified key is not unique and strict=True,",
"= shelve.open('example.dat', flag='n') lkp = etl.dictlookup(table1, 'foo', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r')",
"the first value wins lkp = etl.dictlookupone(table1, 'foo') lkp['a'] lkp['b'] # if the",
"# compound keys are supported table2 = [['foo', 'bar', 'baz'], ['a', 1, True],",
"############## import petl as etl table1 = [['foo', 'bar'], ['a', 1], ['b', 2],",
"False], ['b', 3, True], ['b', 3, False]] lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz')",
"import petl as etl table1 = [['foo', 'bar'], ['a', 1], ['b', 2], ['b',",
"key is not unique and strict=True, will raise # DuplicateKeyError try: lkp =",
"strict=False (default), # the first value wins lkp = etl.dictlookupone(table1, 'foo') lkp['a'] lkp['b']",
"key is not unique and strict=False (default), # the first value wins lkp",
"= shelve.open('example.dat', flag='n') lkp = etl.lookupone(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat',",
"= etl.dictlookupone(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can",
"lkp['b'] # dictlookupone() ################# import petl as etl table1 = [['foo', 'bar'], ['a',",
"# the first value wins lkp = etl.lookupone(table1, 'foo', 'bar') lkp['a'] lkp['b'] #",
"# lookupone() ############# import petl as etl table1 = [['foo', 'bar'], ['a', 1],",
"value wins lkp = etl.dictlookupone(table1, 'foo') lkp['a'] lkp['b'] # if the specified key",
"'foo') lkp['a'] lkp['b'] # compound keys are supported table2 = [['foo', 'bar', 'baz'],",
"= shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookup() ############## import petl as etl table1",
"division, print_function, absolute_import # lookup() ########## import petl as etl table1 = [['foo',",
"except etl.errors.DuplicateKeyError as e: print(e) # compound keys are supported table2 = [['foo',",
"(as a tuple) lkp = etl.lookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are",
"and strict=True, will raise # DuplicateKeyError try: lkp = etl.lookupone(table1, 'foo', strict=True) except",
"lkp = etl.lookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e) # compound keys",
"shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookup(table1, 'foo', 'bar', lkp) lkp.close() lkp",
"etl.lookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e) # compound keys are supported",
"etl.lookupone(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookup()",
"are supported table2 = [['foo', 'bar', 'baz'], ['a', 1, True], ['b', 2, False],",
"('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can be loaded",
"shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookup(table1, 'foo', 'bar',",
"import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookupone(table1, 'foo', lkp) lkp.close() lkp",
"# row (as a tuple) lkp = etl.lookup(table1, 'foo') lkp['a'] lkp['b'] # compound",
"etl table1 = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] # if",
"shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookupone(table1, 'foo', 'bar', lkp) lkp.close() lkp",
"True], ['b', 2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.dictlookupone(table2,",
"etl.dictlookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are supported table2 = [['foo', 'bar',",
"2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.lookupone(table2, ('foo', 'bar'),",
"if the specified key is not unique and strict=False (default), # the first",
"2], ['b', 3]] # if the specified key is not unique and strict=False",
"not unique and strict=True, will raise # DuplicateKeyError try: lkp = etl.lookupone(table1, 'foo',",
"shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookup() ############## import petl as etl table1 =",
"flag='r') lkp['a'] lkp['b'] # dictlookupone() ################# import petl as etl table1 = [['foo',",
"= etl.lookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e) # compound keys are",
"as e: print(e) # compound keys are supported table2 = [['foo', 'bar', 'baz'],",
"dictlookup() ############## import petl as etl table1 = [['foo', 'bar'], ['a', 1], ['b',",
"lkp = shelve.open('example.dat', flag='n') lkp = etl.lookup(table1, 'foo', 'bar', lkp) lkp.close() lkp =",
"dictlookupone() ################# import petl as etl table1 = [['foo', 'bar'], ['a', 1], ['b',",
"lkp['b'] # lookupone() ############# import petl as etl table1 = [['foo', 'bar'], ['a',",
"flag='r') lkp['a'] lkp['b'] # lookupone() ############# import petl as etl table1 = [['foo',",
"keys are supported table2 = [['foo', 'bar', 'baz'], ['a', 1, True], ['b', 2,",
"flag='r') lkp['a'] lkp['b'] # dictlookup() ############## import petl as etl table1 = [['foo',",
"lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookup() ############## import petl",
"etl table1 = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] lkp =",
"['b', 3]] # if the specified key is not unique and strict=False (default),",
"petl as etl table1 = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]]",
"3, False]] lkp = etl.dictlookup(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)]",
"etl.lookup(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if no valuespec argument is given, defaults",
"3)] # data can be loaded into an existing dictionary-like # object, including",
"value wins lkp = etl.lookupone(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if the specified",
"['b', 3, False]] lkp = etl.dictlookupone(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b',",
"['b', 3]] lkp = etl.lookup(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if no valuespec",
"['a', 1], ['b', 2], ['b', 3]] # if the specified key is not",
"'bar'], ['a', 1], ['b', 2], ['b', 3]] lkp = etl.dictlookup(table1, 'foo') lkp['a'] lkp['b']",
"existing dictionary-like # object, including persistent dictionaries created via the # shelve module",
"if the specified key is not unique and strict=True, will raise # DuplicateKeyError",
"1, True], ['b', 2, False], ['b', 3, True], ['b', 3, False]] lkp =",
"first value wins lkp = etl.lookupone(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if the",
"'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can be loaded",
"lkp = etl.dictlookup(table1, 'foo', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] #",
"unique and strict=True, will raise # DuplicateKeyError try: lkp = etl.lookupone(table1, 'foo', strict=True)",
"and strict=False (default), # the first value wins lkp = etl.lookupone(table1, 'foo', 'bar')",
"etl.dictlookup(table1, 'foo', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookupone() #################",
"= etl.lookupone(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if the specified key is not",
"'bar'], ['a', 1], ['b', 2], ['b', 3]] lkp = etl.lookup(table1, 'foo', 'bar') lkp['a']",
"= etl.lookupone(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data",
"specified key is not unique and strict=True, will raise # DuplicateKeyError try: lkp",
"shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # lookupone() ############# import petl as etl table1 =",
"['a', 1], ['b', 2], ['b', 3]] lkp = etl.lookup(table1, 'foo', 'bar') lkp['a'] lkp['b']",
"shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookupone(table1, 'foo', 'bar',",
"# dictlookup() ############## import petl as etl table1 = [['foo', 'bar'], ['a', 1],",
"3, False]] lkp = etl.dictlookupone(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)]",
"be loaded into an existing dictionary-like # object, including persistent dictionaries created via",
"will raise # DuplicateKeyError try: lkp = etl.lookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as",
"= etl.lookupone(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] #",
"True], ['b', 3, False]] lkp = etl.dictlookupone(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)]",
"2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.lookup(table2, ('foo', 'bar'),",
"print(e) # compound keys are supported table2 = [['foo', 'bar', 'baz'], ['a', 1,",
"tuple) lkp = etl.lookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are supported table2",
"etl.dictlookup(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can be",
"try: lkp = etl.dictlookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e) # compound",
"from __future__ import division, print_function, absolute_import # lookup() ########## import petl as etl",
"3, False]] lkp = etl.lookup(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b',",
"is not unique and strict=True, will raise # DuplicateKeyError try: lkp = etl.lookupone(table1,",
"lkp['b'] # if the specified key is not unique and strict=True, will raise",
"is not unique and strict=True, will raise # DuplicateKeyError try: lkp = etl.dictlookupone(table1,",
"shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookup(table1, 'foo', lkp) lkp.close() lkp =",
"############# import petl as etl table1 = [['foo', 'bar'], ['a', 1], ['b', 2],",
"as etl table1 = [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] lkp",
"# DuplicateKeyError try: lkp = etl.dictlookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e)",
"a tuple) lkp = etl.lookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are supported",
"# object, including persistent dictionaries created via the # shelve module import shelve",
"lkp[('b', 3)] # data can be loaded into an existing dictionary-like # object,",
"data can be loaded into an existing dictionary-like # object, including persistent dictionaries",
"shelve.open('example.dat', flag='n') lkp = etl.dictlookupone(table1, 'foo', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a']",
"['b', 2], ['b', 3]] lkp = etl.lookup(table1, 'foo', 'bar') lkp['a'] lkp['b'] # if",
"['b', 3, True], ['b', 3, False]] lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz') lkp[('a',",
"print_function, absolute_import # lookup() ########## import petl as etl table1 = [['foo', 'bar'],",
"= shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # lookupone() ############# import petl as etl table1",
"# shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookupone(table1, 'foo',",
"lkp['a'] lkp['b'] # compound keys are supported table2 = [['foo', 'bar', 'baz'], ['a',",
"loaded into an existing dictionary-like # object, including persistent dictionaries created via the",
"lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookupone() ################# import petl",
"import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookup(table1, 'foo', lkp) lkp.close() lkp",
"True], ['b', 2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.dictlookup(table2,",
"flag='n') lkp = etl.dictlookupone(table1, 'foo', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b']",
"whole # row (as a tuple) lkp = etl.lookup(table1, 'foo') lkp['a'] lkp['b'] #",
"table2 = [['foo', 'bar', 'baz'], ['a', 1, True], ['b', 2, False], ['b', 3,",
"lookup() ########## import petl as etl table1 = [['foo', 'bar'], ['a', 1], ['b',",
"False]] lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)]",
"shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # dictlookupone() ################# import petl as etl table1 =",
"= [['foo', 'bar'], ['a', 1], ['b', 2], ['b', 3]] # if the specified",
"# shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookupone(table1, 'foo',",
"etl.lookup(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b'] # lookupone()",
"flag='n') lkp = etl.lookupone(table1, 'foo', 'bar', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a']",
"lkp = shelve.open('example.dat', flag='n') lkp = etl.dictlookup(table1, 'foo', lkp) lkp.close() lkp = shelve.open('example.dat',",
"etl.dictlookupone(table1, 'foo') lkp['a'] lkp['b'] # if the specified key is not unique and",
"['b', 2, False], ['b', 3, True], ['b', 3, False]] lkp = etl.dictlookup(table2, ('foo',",
"given, defaults to the whole # row (as a tuple) lkp = etl.lookup(table1,",
"etl.lookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are supported table2 = [['foo', 'bar',",
"__future__ import division, print_function, absolute_import # lookup() ########## import petl as etl table1",
"try: lkp = etl.lookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e) # compound",
"dictionary-like # object, including persistent dictionaries created via the # shelve module import",
"= etl.dictlookup(table2, ('foo', 'bar')) lkp[('a', 1)] lkp[('b', 2)] lkp[('b', 3)] # data can",
"'foo', 'bar') lkp['a'] lkp['b'] # if no valuespec argument is given, defaults to",
"import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookup(table1, 'foo', 'bar', lkp) lkp.close()",
"# data can be loaded into an existing dictionary-like # object, including persistent",
"no valuespec argument is given, defaults to the whole # row (as a",
"import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookupone(table1, 'foo', 'bar', lkp) lkp.close()",
"# shelve module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookup(table1, 'foo',",
"absolute_import # lookup() ########## import petl as etl table1 = [['foo', 'bar'], ['a',",
"module import shelve lkp = shelve.open('example.dat', flag='n') lkp = etl.lookupone(table1, 'foo', 'bar', lkp)",
"= shelve.open('example.dat', flag='n') lkp = etl.dictlookupone(table1, 'foo', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r')",
"flag='n') lkp = etl.dictlookup(table1, 'foo', lkp) lkp.close() lkp = shelve.open('example.dat', flag='r') lkp['a'] lkp['b']",
"(default), # the first value wins lkp = etl.lookupone(table1, 'foo', 'bar') lkp['a'] lkp['b']",
"lkp = etl.lookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys are supported table2 =",
"True], ['b', 3, False]] lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b',",
"not unique and strict=False (default), # the first value wins lkp = etl.dictlookupone(table1,",
"True], ['b', 3, False]] lkp = etl.lookup(table2, ('foo', 'bar'), 'baz') lkp[('a', 1)] lkp[('b',",
"2)] lkp[('b', 3)] # data can be loaded into an existing dictionary-like #",
"strict=True) except etl.errors.DuplicateKeyError as e: print(e) # compound keys are supported table2 =",
"1], ['b', 2], ['b', 3]] lkp = etl.lookup(table1, 'foo', 'bar') lkp['a'] lkp['b'] #",
"'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e) # compound keys are supported table2",
"etl.dictlookupone(table1, 'foo', strict=True) except etl.errors.DuplicateKeyError as e: print(e) # compound keys are supported",
"row (as a tuple) lkp = etl.lookup(table1, 'foo') lkp['a'] lkp['b'] # compound keys"
] |
[
"(Rmin + Rmax)) alpha = np.arccos( (Rmax[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake]) )",
"dist_cross) self.M_ijl[index_down, index_up, l_wd] = ( (A_ol/self.Ar_list[index_down])**2 / (1 + self.alpha*dist_down/self.R_list[index_up])**4) # calculate",
"A_il, k_il, f_il = self.getAkf(x_il, y_il, wd_il) for k_ws in range(self.num_ws_bin): self.pdf_ikl[:, k_ws,",
"between two centers [m] Returns ------- A_ol: array:float Overlapping area [m^2] \"\"\" #",
"downwind_order[i_up+1:] dist_down = x_rotated[index_down] - x_rotated[index_up] dist_cross = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 +",
"alpha*Rmax[index_partialwake]**2 + beta*Rmin[index_partialwake]**2 - 2.0*A_triangle ) return A_ol def cal_pdf_Weibull(self, v, A, k):",
"rotate_angle = (270 - self.wd_binned[l_wd])*np.pi/180.0 cos_rotate = np.cos(rotate_angle) sin_rotate = np.sin(rotate_angle) x_rotated =",
"def change_layout(self, complete_layout_new): \"\"\" Assume only locations of turbines changed, and number, hub-height",
"self.num_wd_bin)) self.pdf_ikl = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) def wind_shear_log(self, H, H_ref): return np.log(H/self.z0)/np.log(H_ref/self.z0)",
"np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) ####################################################################### # 3. calculate real wind speed # calculate M_ijl",
"with radius R1 and R2, centers distanced d. The calculation formula can be",
"index_down = downwind_order[i_up+1:] dist_down = x_rotated[index_down] - x_rotated[index_up] dist_cross = np.sqrt( (y_rotated[index_down] -",
"v, A, k): return ((k / A) * (v / A) ** (k",
"k_ws in range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il",
"= np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) dist_cross = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) R_wake = np.zeros((self.num_wt, self.num_wt,",
"(A1) of : [Ref] <NAME>, <NAME>, Solving the wind farm layout optimization problem",
"Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake]) ) A_triangle = np.sqrt( p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake]) ) A_ol[index_partialwake] =",
"self.num_wd_bin)), axis=0) for i_wt in range(self.num_wt)], axis=0) A_ol = self.cal_overlapping_area(R_wake, R, dist_cross) self.M_ijl",
"range(self.num_wt)], axis=0) A_ol = self.cal_overlapping_area(R_wake, R, dist_cross) self.M_ijl = np.where(dist_down>0, (A_ol/(np.pi*R**2))**2 / (1",
"in range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il #######################################################################",
"of the second circle [m] d: array:float Distance between two centers [m] Returns",
"= ( self.alpha*dist_down[index_down, index_up, l_wd] + self.R_list[index_up]) R = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)),",
"alpha self.ws_binned = ws_binned self.wd_binned = wd_binned self.z0=z0 self.wt_types = wf_design.wt_types self.wind_shear_multi =",
"self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il dist_down = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) dist_cross = np.zeros((self.num_wt,",
"axis=0) for l_wt in range(self.num_wt)], axis=0) A_il, k_il, f_il = self.getAkf(x_il, y_il, wd_il)",
"np.sqrt( p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake]) ) A_ol[index_partialwake] = ( alpha*Rmax[index_partialwake]**2 + beta*Rmin[index_partialwake]**2 -",
"A, k): return ((k / A) * (v / A) ** (k -",
"self.num_ws_bin = len(ws_binned) self.num_wd_bin = len(wd_binned) self.ws_bin_size = ws_binned[1] - ws_binned[0] self.wd_bin_size =",
"np.arccos( (Rmin[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake]) ) A_triangle = np.sqrt( p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])*",
":]) N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl))",
"circle [m] d: array:float Distance between two centers [m] Returns ------- A_ol: array:float",
"------- A_ol: array:float Overlapping area [m^2] \"\"\" # treat all input as array",
"2] - self.complete_layout[index_up, 2])**2) R_wake = self.alpha*dist_down + self.R_list[index_up] A_ol = self.cal_overlapping_area(R_wake, self.R_list[index_down],",
"range(self.num_wd_bin): rotate_angle = (270 - self.wd_binned[l_wd])*np.pi/180.0 cos_rotate = np.cos(rotate_angle) sin_rotate = np.sin(rotate_angle) x_rotated",
"Distance between two centers [m] Returns ------- A_ol: array:float Overlapping area [m^2] \"\"\"",
"i_up in range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down = x_rotated[index_down] -",
"self.cal_overlapping_area(R_wake, R, dist_cross) self.M_ijl = np.where(dist_down>0, (A_ol/(np.pi*R**2))**2 / (1 + self.alpha*dist_down/R1)**4, 0) #",
"(self.complete_layout[:, 0]*cos_rotate + self.complete_layout[:, 1]*sin_rotate) y_rotated = (self.complete_layout[:, 1]*cos_rotate - self.complete_layout[:, 0]*sin_rotate) downwind_order",
"M_ijl matrix for l_wd in range(self.num_wd_bin): rotate_angle = (270 - self.wd_binned[l_wd])*np.pi/180.0 cos_rotate =",
"i_up in range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down[index_down, index_up, l_wd] =",
"(p[index_partialwake]-d[index_partialwake]) ) A_ol[index_partialwake] = ( alpha*Rmax[index_partialwake]**2 + beta*Rmin[index_partialwake]**2 - 2.0*A_triangle ) return A_ol",
"(self.num_wt, self.num_wd_bin)), axis=1) for i_wt in range(self.num_wt)], axis=1) R1 = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt,",
"self.num_ws_bin, self.num_wd_bin)) self.v_ikl_real = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.pdf_ikl = np.zeros( (self.num_wt, self.num_ws_bin,",
"for m_type in set(self.type_list): index_cal = self.type_list == m_type N_jk[index_cal, :] = self.wt_types[m_type].get_Ct(",
"1], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0) for l_wt",
"Equation (A1), '2' before alpha and beta should be 1. Parameters ---------- R1:",
"2])**2) R_wake = self.alpha*dist_down + self.R_list[index_up] A_ol = self.cal_overlapping_area(R_wake, self.R_list[index_down], dist_cross) self.M_ijl[index_down, index_up,",
"= np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) def wind_shear_log(self, H, H_ref): return np.log(H/self.z0)/np.log(H_ref/self.z0) def change_layout(self,",
"as np class FlowField(object): def __init__(self, wf_design, getAkf, height_ref, alpha=0.04, ws_binned=np.linspace(1, 30, 30),",
"self.complete_layout = complete_layout_new def cal_flow_field(self): ###################################################################### # 1. calculate ideal wind speed v_ik",
"ws_binned[1] - ws_binned[0] self.wd_bin_size = wd_binned[1] - wd_binned[0] self.R_list = wf_design.D_list/2 self.Ar_list =",
"self.complete_layout[index_up, 2])**2) R_wake[index_down, index_up, l_wd] = ( self.alpha*dist_down[index_down, index_up, l_wd] + self.R_list[index_up]) R",
"H, H_ref): return np.log(H/self.z0)/np.log(H_ref/self.z0) def change_layout(self, complete_layout_new): \"\"\" Assume only locations of turbines",
"dtype='int') self.M_ijl = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) self.v_ikl_ideal = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.v_ikl_real",
"wf_design.Ar_list self.type_list = np.array([int(t) for t in self.complete_layout[:, 3]], dtype='int') self.M_ijl = np.zeros((self.num_wt,",
"<gh_stars>1-10 # -*- coding: utf-8 -*- import numpy as np class FlowField(object): def",
"v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1), np.expand_dims(self.ws_binned, axis=0)), axis=-1) self.v_ikl_ideal = np.concatenate([v_ik for l_wd",
"and beta should be 1. Parameters ---------- R1: array:float Radius of the first",
"t in self.complete_layout[:, 3]], dtype='int') self.M_ijl = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) self.v_ikl_ideal = np.zeros(",
"wf_design.wt_types self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:, 2], height_ref) self.num_wt = wf_design.num_wt self.num_ws_bin = len(ws_binned) self.num_wd_bin",
"np class FlowField(object): def __init__(self, wf_design, getAkf, height_ref, alpha=0.04, ws_binned=np.linspace(1, 30, 30), wd_binned=np.linspace(0,",
"v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_overlapping_area(self, R1,",
"self.R_list[index_up]) R = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=1) for i_wt in range(self.num_wt)], axis=1)",
"wd_binned[0] self.R_list = wf_design.D_list/2 self.Ar_list = wf_design.Ar_list self.type_list = np.array([int(t) for t in",
"330, 12), z0=0.01 ): self.complete_layout = wf_design.complete_layout self.getAkf = getAkf self.height_ref = height_ref",
":] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il dist_down = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin))",
"= (270 - self.wd_binned[l_wd])*np.pi/180.0 cos_rotate = np.cos(rotate_angle) sin_rotate = np.sin(rotate_angle) x_rotated = (self.complete_layout[:,",
"l_wd] = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake[index_down,",
"[m] Returns ------- A_ol: array:float Overlapping area [m^2] \"\"\" # treat all input",
"= np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake =",
"/ (1 + self.alpha*dist_down/R1)**4, 0) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :,",
"1. Parameters ---------- R1: array:float Radius of the first circle [m] R2: array:float",
"calculate ideal wind speed v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1), np.expand_dims(self.ws_binned, axis=0)), axis=-1) self.v_ikl_ideal",
"f_il ####################################################################### # 3. calculate real wind speed # calculate M_ijl matrix for",
"np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0) for",
"calculate pdf of local ideal wind speed x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1) for",
"range(self.num_wd_bin)], axis=-1) wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0) for l_wt in range(self.num_wt)], axis=0) A_il, k_il,",
"2], height_ref) self.num_wt = wf_design.num_wt self.num_ws_bin = len(ws_binned) self.num_wd_bin = len(wd_binned) self.ws_bin_size =",
": [Ref] <NAME>, <NAME>, Solving the wind farm layout optimization problem using Random",
"# calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :, 0] N_jk = np.zeros_like(v_jk) for",
"d = np.array(R1), np.array(R2), np.array(d), A_ol = np.zeros_like(R1) p = (R1 + R2",
"__init__(self, wf_design, getAkf, height_ref, alpha=0.04, ws_binned=np.linspace(1, 30, 30), wd_binned=np.linspace(0, 330, 12), z0=0.01 ):",
"wf_design.complete_layout self.getAkf = getAkf self.height_ref = height_ref self.alpha = alpha self.ws_binned = ws_binned",
"= ( x_rotated[index_down] - x_rotated[index_up]) dist_cross[index_down, index_up, l_wd] = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2",
"circles with radius R1 and R2, centers distanced d. The calculation formula can",
"in range(self.num_wt)], axis=0) A_il, k_il, f_il = self.getAkf(x_il, y_il, wd_il) for k_ws in",
"range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down[index_down, index_up, l_wd] = ( x_rotated[index_down]",
"(y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake[index_down, index_up, l_wd] =",
"d < (Rmin + Rmax)) alpha = np.arccos( (Rmax[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmin[index_partialwake]**2)",
"* (v / A) ** (k - 1) * np.exp(-(v / A) **",
"(v / A) ** (k - 1) * np.exp(-(v / A) ** k))",
"import numpy as np class FlowField(object): def __init__(self, wf_design, getAkf, height_ref, alpha=0.04, ws_binned=np.linspace(1,",
"* f_il dist_down = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) dist_cross = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) R_wake",
"= len(wd_binned) self.ws_bin_size = ws_binned[1] - ws_binned[0] self.wd_bin_size = wd_binned[1] - wd_binned[0] self.R_list",
"np.log(H/self.z0)/np.log(H_ref/self.z0) def change_layout(self, complete_layout_new): \"\"\" Assume only locations of turbines changed, and number,",
"'2' before alpha and beta should be 1. Parameters ---------- R1: array:float Radius",
"numpy as np class FlowField(object): def __init__(self, wf_design, getAkf, height_ref, alpha=0.04, ws_binned=np.linspace(1, 30,",
"np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) R_wake = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) A_ol = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin))",
"problem using Random search algorithm, Reneable Energy 78 (2015) 182-192 Note that however",
"area [m^2] \"\"\" # treat all input as array R1, R2, d =",
"np.pi*Rmin[index_fullwake]**2 # partial wake cases index_partialwake = np.logical_and(d > (Rmax -Rmin), d <",
"typos in Equation (A1), '2' before alpha and beta should be 1. Parameters",
"and number, hub-height and types of turbines remained the same.\"\"\" self.complete_layout = complete_layout_new",
"self.type_list == m_type N_jk[index_cal, :] = self.wt_types[m_type].get_Ct( v_jk[index_cal, :]) N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real",
"self.cal_overlapping_area(R_wake, self.R_list[index_down], dist_cross) self.M_ijl[index_down, index_up, l_wd] = ( (A_ol/self.Ar_list[index_down])**2 / (1 + self.alpha*dist_down/self.R_list[index_up])**4)",
"( (A_ol/self.Ar_list[index_down])**2 / (1 + self.alpha*dist_down/self.R_list[index_up])**4) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:,",
"axis=-1) ###################################################################### # 2. calculate pdf of local ideal wind speed x_il =",
"- self.complete_layout[index_up, 2])**2) R_wake = self.alpha*dist_down + self.R_list[index_up] A_ol = self.cal_overlapping_area(R_wake, self.R_list[index_down], dist_cross)",
"are typos in Equation (A1), '2' before alpha and beta should be 1.",
"np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_overlapping_area(self, R1, R2, d): \"\"\" Calculate the",
"- self.complete_layout[index_up, 2])**2) R_wake[index_down, index_up, l_wd] = ( self.alpha*dist_down[index_down, index_up, l_wd] + self.R_list[index_up])",
"circle [m] R2: array:float Radius of the second circle [m] d: array:float Distance",
"(self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake = self.alpha*dist_down + self.R_list[index_up] A_ol = self.cal_overlapping_area(R_wake,",
"l_wd] = ( self.alpha*dist_down[index_down, index_up, l_wd] + self.R_list[index_up]) R = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt,",
"= np.arccos( (Rmax[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake]) ) beta = np.arccos( (Rmin[index_partialwake]**2.0",
"2] - self.complete_layout[index_up, 2])**2) R_wake[index_down, index_up, l_wd] = ( self.alpha*dist_down[index_down, index_up, l_wd] +",
"np.where(dist_down>0, (A_ol/(np.pi*R**2))**2 / (1 + self.alpha*dist_down/R1)**4, 0) # calculate N_jk matrix v_jk =",
"d): \"\"\" Calculate the overlapping area of two circles with radius R1 and",
"alpha and beta should be 1. Parameters ---------- R1: array:float Radius of the",
"- wd_binned[0] self.R_list = wf_design.D_list/2 self.Ar_list = wf_design.Ar_list self.type_list = np.array([int(t) for t",
"dist_down = x_rotated[index_down] - x_rotated[index_up] dist_cross = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down,",
"- ws_binned[0] self.wd_bin_size = wd_binned[1] - wd_binned[0] self.R_list = wf_design.D_list/2 self.Ar_list = wf_design.Ar_list",
"np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1), np.expand_dims(self.ws_binned, axis=0)), axis=-1) self.v_ikl_ideal = np.concatenate([v_ik for l_wd in range(self.num_wd_bin)], axis=-1)",
"- y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake[index_down, index_up, l_wd] = (",
"= x_rotated[index_down] - x_rotated[index_up] dist_cross = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2]",
"self.num_wd_bin)) def wind_shear_log(self, H, H_ref): return np.log(H/self.z0)/np.log(H_ref/self.z0) def change_layout(self, complete_layout_new): \"\"\" Assume only",
"np.logical_and(d > (Rmax -Rmin), d < (Rmin + Rmax)) alpha = np.arccos( (Rmax[index_partialwake]**2.0",
"cal_overlapping_area(self, R1, R2, d): \"\"\" Calculate the overlapping area of two circles with",
"= np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) self.v_ikl_ideal = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.v_ikl_real = np.zeros(",
"for k_ws in range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) *",
"[m] R2: array:float Radius of the second circle [m] d: array:float Distance between",
"self.R_list[index_down], dist_cross) self.M_ijl[index_down, index_up, l_wd] = ( (A_ol/self.Ar_list[index_down])**2 / (1 + self.alpha*dist_down/self.R_list[index_up])**4) #",
"np.arccos( (Rmax[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake]) ) beta = np.arccos( (Rmin[index_partialwake]**2.0 +",
"Assume only locations of turbines changed, and number, hub-height and types of turbines",
"182-192 Note that however there are typos in Equation (A1), '2' before alpha",
"Rmax = np.where(R1 < R2, R2, R1) Rmin = np.where(R1 < R2, R1,",
"+ d[index_partialwake]**2 - Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake]) ) A_triangle = np.sqrt( p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake])",
"set(self.type_list): index_cal = self.type_list == m_type N_jk[index_cal, :] = self.wt_types[m_type].get_Ct( v_jk[index_cal, :]) N_jk",
"self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_flow_field_naive(self): ###################################################################### # 1. calculate",
"self.num_wt = wf_design.num_wt self.num_ws_bin = len(ws_binned) self.num_wd_bin = len(wd_binned) self.ws_bin_size = ws_binned[1] -",
"alpha=0.04, ws_binned=np.linspace(1, 30, 30), wd_binned=np.linspace(0, 330, 12), z0=0.01 ): self.complete_layout = wf_design.complete_layout self.getAkf",
"self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il ####################################################################### # 3.",
"Solving the wind farm layout optimization problem using Random search algorithm, Reneable Energy",
"1]*sin_rotate) y_rotated = (self.complete_layout[:, 1]*cos_rotate - self.complete_layout[:, 0]*sin_rotate) downwind_order = np.argsort(x_rotated) for i_up",
"that however there are typos in Equation (A1), '2' before alpha and beta",
"(y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake = self.alpha*dist_down +",
"axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0) for l_wt in",
"= (self.complete_layout[:, 0]*cos_rotate + self.complete_layout[:, 1]*sin_rotate) y_rotated = (self.complete_layout[:, 1]*cos_rotate - self.complete_layout[:, 0]*sin_rotate)",
"np.zeros_like(R1) p = (R1 + R2 + d)/2.0 # make sure R_big >=",
"index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down[index_down, index_up, l_wd] = ( x_rotated[index_down] -",
"for l_wt in range(self.num_wt)], axis=0) A_il, k_il, f_il = self.getAkf(x_il, y_il, wd_il) for",
"before alpha and beta should be 1. Parameters ---------- R1: array:float Radius of",
") A_triangle = np.sqrt( p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake]) ) A_ol[index_partialwake] = ( alpha*Rmax[index_partialwake]**2",
"self.M_ijl)) def cal_overlapping_area(self, R1, R2, d): \"\"\" Calculate the overlapping area of two",
"/(2.0*Rmin[index_partialwake]*d[index_partialwake]) ) A_triangle = np.sqrt( p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake]) ) A_ol[index_partialwake] = (",
"+ d)/2.0 # make sure R_big >= R_small Rmax = np.where(R1 < R2,",
"\"\"\" Calculate the overlapping area of two circles with radius R1 and R2,",
"in range(self.num_wd_bin)], axis=-1) wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0) for l_wt in range(self.num_wt)], axis=0) A_il,",
"should be 1. Parameters ---------- R1: array:float Radius of the first circle [m]",
"the second circle [m] d: array:float Distance between two centers [m] Returns -------",
"cos_rotate = np.cos(rotate_angle) sin_rotate = np.sin(rotate_angle) x_rotated = (self.complete_layout[:, 0]*cos_rotate + self.complete_layout[:, 1]*sin_rotate)",
"self.num_wd_bin)) A_ol = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) ####################################################################### # 3. calculate real wind speed",
"p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake]) ) A_ol[index_partialwake] = ( alpha*Rmax[index_partialwake]**2 + beta*Rmin[index_partialwake]**2 - 2.0*A_triangle",
"= ws_binned[1] - ws_binned[0] self.wd_bin_size = wd_binned[1] - wd_binned[0] self.R_list = wf_design.D_list/2 self.Ar_list",
"R2, R2, R1) Rmin = np.where(R1 < R2, R1, R2) # full wake",
"getAkf self.height_ref = height_ref self.alpha = alpha self.ws_binned = ws_binned self.wd_binned = wd_binned",
"index_cal = self.type_list == m_type N_jk[index_cal, :] = self.wt_types[m_type].get_Ct( v_jk[index_cal, :]) N_jk =",
"+ R2 + d)/2.0 # make sure R_big >= R_small Rmax = np.where(R1",
"wd_binned self.z0=z0 self.wt_types = wf_design.wt_types self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:, 2], height_ref) self.num_wt = wf_design.num_wt",
"number, hub-height and types of turbines remained the same.\"\"\" self.complete_layout = complete_layout_new def",
"= np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) ####################################################################### # 3. calculate real wind speed # calculate",
"partial wake cases index_partialwake = np.logical_and(d > (Rmax -Rmin), d < (Rmin +",
"R2: array:float Radius of the second circle [m] d: array:float Distance between two",
"matrix v_jk = self.v_ikl_ideal[:, :, 0] N_jk = np.zeros_like(v_jk) for m_type in set(self.type_list):",
"wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0) for l_wt in range(self.num_wt)], axis=0) A_il, k_il, f_il =",
"locations of turbines changed, and number, hub-height and types of turbines remained the",
"cases index_partialwake = np.logical_and(d > (Rmax -Rmin), d < (Rmin + Rmax)) alpha",
"[Ref] <NAME>, <NAME>, Solving the wind farm layout optimization problem using Random search",
"self.wd_binned = wd_binned self.z0=z0 self.wt_types = wf_design.wt_types self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:, 2], height_ref) self.num_wt",
") beta = np.arccos( (Rmin[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake]) ) A_triangle =",
"-Rmin)) A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2 # partial wake cases index_partialwake = np.logical_and(d > (Rmax",
"= downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down[index_down, index_up, l_wd] = ( x_rotated[index_down] - x_rotated[index_up])",
"Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake]) ) beta = np.arccos( (Rmin[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake]) )",
"radius R1 and R2, centers distanced d. The calculation formula can be found",
"ws_binned=np.linspace(1, 30, 30), wd_binned=np.linspace(0, 330, 12), z0=0.01 ): self.complete_layout = wf_design.complete_layout self.getAkf =",
"in self.complete_layout[:, 3]], dtype='int') self.M_ijl = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) self.v_ikl_ideal = np.zeros( (self.num_wt,",
"only locations of turbines changed, and number, hub-height and types of turbines remained",
"A_ol = self.cal_overlapping_area(R_wake, R, dist_cross) self.M_ijl = np.where(dist_down>0, (A_ol/(np.pi*R**2))**2 / (1 + self.alpha*dist_down/R1)**4,",
"wf_design, getAkf, height_ref, alpha=0.04, ws_binned=np.linspace(1, 30, 30), wd_binned=np.linspace(0, 330, 12), z0=0.01 ): self.complete_layout",
"full wake cases index_fullwake = (d<= (Rmax -Rmin)) A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2 # partial",
"can be found in Eq. (A1) of : [Ref] <NAME>, <NAME>, Solving the",
"N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def",
"ws_binned self.wd_binned = wd_binned self.z0=z0 self.wt_types = wf_design.wt_types self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:, 2], height_ref)",
"distanced d. The calculation formula can be found in Eq. (A1) of :",
"downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down[index_down, index_up, l_wd] = ( x_rotated[index_down] - x_rotated[index_up]) dist_cross[index_down,",
"self.complete_layout[index_up, 2])**2) R_wake = self.alpha*dist_down + self.R_list[index_up] A_ol = self.cal_overlapping_area(R_wake, self.R_list[index_down], dist_cross) self.M_ijl[index_down,",
"np.array(R1), np.array(R2), np.array(d), A_ol = np.zeros_like(R1) p = (R1 + R2 + d)/2.0",
"(Rmax -Rmin), d < (Rmin + Rmax)) alpha = np.arccos( (Rmax[index_partialwake]**2.0 + d[index_partialwake]**2",
"R2, d = np.array(R1), np.array(R2), np.array(d), A_ol = np.zeros_like(R1) p = (R1 +",
"self.wt_types = wf_design.wt_types self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:, 2], height_ref) self.num_wt = wf_design.num_wt self.num_ws_bin =",
"= np.argsort(x_rotated) for i_up in range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down[index_down,",
"( self.alpha*dist_down[index_down, index_up, l_wd] + self.R_list[index_up]) R = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=1)",
"(p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake]) ) A_ol[index_partialwake] = ( alpha*Rmax[index_partialwake]**2 + beta*Rmin[index_partialwake]**2 - 2.0*A_triangle )",
"cal_flow_field(self): ###################################################################### # 1. calculate ideal wind speed v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1),",
"wind speed x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) y_il",
"second circle [m] d: array:float Distance between two centers [m] Returns ------- A_ol:",
"R, dist_cross) self.M_ijl = np.where(dist_down>0, (A_ol/(np.pi*R**2))**2 / (1 + self.alpha*dist_down/R1)**4, 0) # calculate",
"real wind speed # calculate M_ijl matrix for l_wd in range(self.num_wd_bin): rotate_angle =",
"search algorithm, Reneable Energy 78 (2015) 182-192 Note that however there are typos",
"self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il dist_down = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) dist_cross =",
"(Rmax[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake]) ) beta = np.arccos( (Rmin[index_partialwake]**2.0 + d[index_partialwake]**2",
"in set(self.type_list): index_cal = self.type_list == m_type N_jk[index_cal, :] = self.wt_types[m_type].get_Ct( v_jk[index_cal, :])",
"= self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il ####################################################################### # 3. calculate real wind",
"(Rmin[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake]) ) A_triangle = np.sqrt( p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])*",
"k): return ((k / A) * (v / A) ** (k - 1)",
"+ self.alpha*dist_down/self.R_list[index_up])**4) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :, 0] N_jk =",
"self.num_wt, self.num_wd_bin)) dist_cross = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) R_wake = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) A_ol",
"x_rotated[index_up]) dist_cross[index_down, index_up, l_wd] = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] -",
"d. The calculation formula can be found in Eq. (A1) of : [Ref]",
"/(2.0*Rmax[index_partialwake]*d[index_partialwake]) ) beta = np.arccos( (Rmin[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake]) ) A_triangle",
":] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il ####################################################################### # 3. calculate real",
"np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) dist_cross = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) R_wake = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin))",
"= np.arccos( (Rmin[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake]) ) A_triangle = np.sqrt( p[index_partialwake]*",
"self.wt_types[m_type].get_Ct( v_jk[index_cal, :]) N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl',",
"self.wind_shear_log(self.complete_layout[:, 2], height_ref) self.num_wt = wf_design.num_wt self.num_ws_bin = len(ws_binned) self.num_wd_bin = len(wd_binned) self.ws_bin_size",
"of turbines changed, and number, hub-height and types of turbines remained the same.\"\"\"",
"in range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down[index_down, index_up, l_wd] = (",
"the overlapping area of two circles with radius R1 and R2, centers distanced",
"FlowField(object): def __init__(self, wf_design, getAkf, height_ref, alpha=0.04, ws_binned=np.linspace(1, 30, 30), wd_binned=np.linspace(0, 330, 12),",
"self.alpha = alpha self.ws_binned = ws_binned self.wd_binned = wd_binned self.z0=z0 self.wt_types = wf_design.wt_types",
"+ self.R_list[index_up] A_ol = self.cal_overlapping_area(R_wake, self.R_list[index_down], dist_cross) self.M_ijl[index_down, index_up, l_wd] = ( (A_ol/self.Ar_list[index_down])**2",
"< (Rmin + Rmax)) alpha = np.arccos( (Rmax[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake])",
"A_ol = self.cal_overlapping_area(R_wake, self.R_list[index_down], dist_cross) self.M_ijl[index_down, index_up, l_wd] = ( (A_ol/self.Ar_list[index_down])**2 / (1",
"np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_overlapping_area(self, R1, R2, d): \"\"\" Calculate the overlapping",
"self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il dist_down = np.zeros((self.num_wt,",
"class FlowField(object): def __init__(self, wf_design, getAkf, height_ref, alpha=0.04, ws_binned=np.linspace(1, 30, 30), wd_binned=np.linspace(0, 330,",
"-*- import numpy as np class FlowField(object): def __init__(self, wf_design, getAkf, height_ref, alpha=0.04,",
"= ws_binned self.wd_binned = wd_binned self.z0=z0 self.wt_types = wf_design.wt_types self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:, 2],",
"R_wake = self.alpha*dist_down + self.R_list[index_up] A_ol = self.cal_overlapping_area(R_wake, self.R_list[index_down], dist_cross) self.M_ijl[index_down, index_up, l_wd]",
"A_ol[index_partialwake] = ( alpha*Rmax[index_partialwake]**2 + beta*Rmin[index_partialwake]**2 - 2.0*A_triangle ) return A_ol def cal_pdf_Weibull(self,",
"def cal_flow_field(self): ###################################################################### # 1. calculate ideal wind speed v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi,",
"l_wd in range(self.num_wd_bin)], axis=-1) y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1) for l_wd in range(self.num_wd_bin)],",
"for t in self.complete_layout[:, 3]], dtype='int') self.M_ijl = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) self.v_ikl_ideal =",
"ideal wind speed v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1), np.expand_dims(self.ws_binned, axis=0)), axis=-1) self.v_ikl_ideal =",
"= ( (A_ol/self.Ar_list[index_down])**2 / (1 + self.alpha*dist_down/self.R_list[index_up])**4) # calculate N_jk matrix v_jk =",
"<NAME>, <NAME>, Solving the wind farm layout optimization problem using Random search algorithm,",
"= (R1 + R2 + d)/2.0 # make sure R_big >= R_small Rmax",
"dist_cross) self.M_ijl = np.where(dist_down>0, (A_ol/(np.pi*R**2))**2 / (1 + self.alpha*dist_down/R1)**4, 0) # calculate N_jk",
"y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake = self.alpha*dist_down + self.R_list[index_up] A_ol",
"wake cases index_partialwake = np.logical_and(d > (Rmax -Rmin), d < (Rmin + Rmax))",
"for l_wd in range(self.num_wd_bin)], axis=-1) ###################################################################### # 2. calculate pdf of local ideal",
"+ beta*Rmin[index_partialwake]**2 - 2.0*A_triangle ) return A_ol def cal_pdf_Weibull(self, v, A, k): return",
"A_il, k_il) * f_il ####################################################################### # 3. calculate real wind speed # calculate",
"d[index_partialwake]**2 - Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake]) ) beta = np.arccos( (Rmin[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmax[index_partialwake]**2)",
"np.where(R1 < R2, R1, R2) # full wake cases index_fullwake = (d<= (Rmax",
"i_wt in range(self.num_wt)], axis=1) R1 = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=0) for i_wt",
"self.M_ijl = np.where(dist_down>0, (A_ol/(np.pi*R**2))**2 / (1 + self.alpha*dist_down/R1)**4, 0) # calculate N_jk matrix",
"wind speed # calculate M_ijl matrix for l_wd in range(self.num_wd_bin): rotate_angle = (270",
"range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il dist_down =",
"self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_overlapping_area(self, R1, R2, d): \"\"\"",
"\"\"\" # treat all input as array R1, R2, d = np.array(R1), np.array(R2),",
":, 0] N_jk = np.zeros_like(v_jk) for m_type in set(self.type_list): index_cal = self.type_list ==",
"(Rmax -Rmin)) A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2 # partial wake cases index_partialwake = np.logical_and(d >",
"centers distanced d. The calculation formula can be found in Eq. (A1) of",
"# partial wake cases index_partialwake = np.logical_and(d > (Rmax -Rmin), d < (Rmin",
"# make sure R_big >= R_small Rmax = np.where(R1 < R2, R2, R1)",
"cal_pdf_Weibull(self, v, A, k): return ((k / A) * (v / A) **",
"dist_cross = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) R_wake = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) A_ol = np.zeros((self.num_wt,",
"= (self.complete_layout[:, 1]*cos_rotate - self.complete_layout[:, 0]*sin_rotate) downwind_order = np.argsort(x_rotated) for i_up in range(self.num_wt-1):",
"self.getAkf = getAkf self.height_ref = height_ref self.alpha = alpha self.ws_binned = ws_binned self.wd_binned",
"the wind farm layout optimization problem using Random search algorithm, Reneable Energy 78",
"pdf of local ideal wind speed x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1) for l_wd",
"height_ref, alpha=0.04, ws_binned=np.linspace(1, 30, 30), wd_binned=np.linspace(0, 330, 12), z0=0.01 ): self.complete_layout = wf_design.complete_layout",
"(2015) 182-192 Note that however there are typos in Equation (A1), '2' before",
"# treat all input as array R1, R2, d = np.array(R1), np.array(R2), np.array(d),",
"---------- R1: array:float Radius of the first circle [m] R2: array:float Radius of",
"formula can be found in Eq. (A1) of : [Ref] <NAME>, <NAME>, Solving",
"= downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down = x_rotated[index_down] - x_rotated[index_up] dist_cross = np.sqrt(",
"return ((k / A) * (v / A) ** (k - 1) *",
"Energy 78 (2015) 182-192 Note that however there are typos in Equation (A1),",
"self.num_wd_bin)) ####################################################################### # 3. calculate real wind speed # calculate M_ijl matrix for",
"self.M_ijl = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) self.v_ikl_ideal = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.v_ikl_real =",
"k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il dist_down = np.zeros((self.num_wt, self.num_wt,",
"<NAME>, Solving the wind farm layout optimization problem using Random search algorithm, Reneable",
"A) * (v / A) ** (k - 1) * np.exp(-(v / A)",
"+ Rmax)) alpha = np.arccos( (Rmax[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake]) ) beta",
"in range(self.num_wt)], axis=1) R1 = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=0) for i_wt in",
"however there are typos in Equation (A1), '2' before alpha and beta should",
"len(ws_binned) self.num_wd_bin = len(wd_binned) self.ws_bin_size = ws_binned[1] - ws_binned[0] self.wd_bin_size = wd_binned[1] -",
"self.num_wd_bin)) R_wake = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) A_ol = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) ####################################################################### #",
"###################################################################### # 2. calculate pdf of local ideal wind speed x_il = np.concatenate([np.expand_dims(self.complete_layout[:,",
"np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake[index_down, index_up, l_wd]",
"R1 = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=0) for i_wt in range(self.num_wt)], axis=0) A_ol",
"Radius of the first circle [m] R2: array:float Radius of the second circle",
"dist_cross = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake",
"range(self.num_wd_bin)], axis=-1) ###################################################################### # 2. calculate pdf of local ideal wind speed x_il",
"2.0*A_triangle ) return A_ol def cal_pdf_Weibull(self, v, A, k): return ((k / A)",
"- self.complete_layout[:, 0]*sin_rotate) downwind_order = np.argsort(x_rotated) for i_up in range(self.num_wt-1): index_up = downwind_order[i_up]",
"np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) def wind_shear_log(self, H, H_ref): return np.log(H/self.z0)/np.log(H_ref/self.z0) def change_layout(self, complete_layout_new):",
") return A_ol def cal_pdf_Weibull(self, v, A, k): return ((k / A) *",
"l_wd in range(self.num_wd_bin): rotate_angle = (270 - self.wd_binned[l_wd])*np.pi/180.0 cos_rotate = np.cos(rotate_angle) sin_rotate =",
"(1 + self.alpha*dist_down/R1)**4, 0) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :, 0]",
"self.num_wt, self.num_wd_bin)) self.v_ikl_ideal = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.v_ikl_real = np.zeros( (self.num_wt, self.num_ws_bin,",
"self.alpha*dist_down/R1)**4, 0) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :, 0] N_jk =",
"= wf_design.wt_types self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:, 2], height_ref) self.num_wt = wf_design.num_wt self.num_ws_bin = len(ws_binned)",
"def cal_pdf_Weibull(self, v, A, k): return ((k / A) * (v / A)",
"np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) A_ol = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) ####################################################################### # 3. calculate real",
"ijl->ikl', N_jk, self.M_ijl)) def cal_overlapping_area(self, R1, R2, d): \"\"\" Calculate the overlapping area",
"np.array([int(t) for t in self.complete_layout[:, 3]], dtype='int') self.M_ijl = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) self.v_ikl_ideal",
"R2, R1, R2) # full wake cases index_fullwake = (d<= (Rmax -Rmin)) A_ol[index_fullwake]",
"= wd_binned[1] - wd_binned[0] self.R_list = wf_design.D_list/2 self.Ar_list = wf_design.Ar_list self.type_list = np.array([int(t)",
"wind_shear_log(self, H, H_ref): return np.log(H/self.z0)/np.log(H_ref/self.z0) def change_layout(self, complete_layout_new): \"\"\" Assume only locations of",
"for l_wd in range(self.num_wd_bin)], axis=-1) wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0) for l_wt in range(self.num_wt)],",
"(270 - self.wd_binned[l_wd])*np.pi/180.0 cos_rotate = np.cos(rotate_angle) sin_rotate = np.sin(rotate_angle) x_rotated = (self.complete_layout[:, 0]*cos_rotate",
"[m] d: array:float Distance between two centers [m] Returns ------- A_ol: array:float Overlapping",
"return np.log(H/self.z0)/np.log(H_ref/self.z0) def change_layout(self, complete_layout_new): \"\"\" Assume only locations of turbines changed, and",
"hub-height and types of turbines remained the same.\"\"\" self.complete_layout = complete_layout_new def cal_flow_field(self):",
"- y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake = self.alpha*dist_down + self.R_list[index_up]",
"= self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_overlapping_area(self, R1, R2, d):",
"np.zeros_like(v_jk) for m_type in set(self.type_list): index_cal = self.type_list == m_type N_jk[index_cal, :] =",
"= wf_design.complete_layout self.getAkf = getAkf self.height_ref = height_ref self.alpha = alpha self.ws_binned =",
"array:float Radius of the first circle [m] R2: array:float Radius of the second",
"(self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.pdf_ikl = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) def wind_shear_log(self, H, H_ref):",
"# 2. calculate pdf of local ideal wind speed x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0],",
"v_jk = self.v_ikl_ideal[:, :, 0] N_jk = np.zeros_like(v_jk) for m_type in set(self.type_list): index_cal",
"Eq. (A1) of : [Ref] <NAME>, <NAME>, Solving the wind farm layout optimization",
"y_il, wd_il) for k_ws in range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il,",
"farm layout optimization problem using Random search algorithm, Reneable Energy 78 (2015) 182-192",
"+ (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake = self.alpha*dist_down + self.R_list[index_up] A_ol =",
"in Equation (A1), '2' before alpha and beta should be 1. Parameters ----------",
"self.ws_binned = ws_binned self.wd_binned = wd_binned self.z0=z0 self.wt_types = wf_design.wt_types self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:,",
"axis=-1) wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0) for l_wt in range(self.num_wt)], axis=0) A_il, k_il, f_il",
"R2, d): \"\"\" Calculate the overlapping area of two circles with radius R1",
"\"\"\" Assume only locations of turbines changed, and number, hub-height and types of",
"cases index_fullwake = (d<= (Rmax -Rmin)) A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2 # partial wake cases",
"len(wd_binned) self.ws_bin_size = ws_binned[1] - ws_binned[0] self.wd_bin_size = wd_binned[1] - wd_binned[0] self.R_list =",
"): self.complete_layout = wf_design.complete_layout self.getAkf = getAkf self.height_ref = height_ref self.alpha = alpha",
"(A_ol/(np.pi*R**2))**2 / (1 + self.alpha*dist_down/R1)**4, 0) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:,",
"of local ideal wind speed x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1) for l_wd in",
"= wf_design.D_list/2 self.Ar_list = wf_design.Ar_list self.type_list = np.array([int(t) for t in self.complete_layout[:, 3]],",
"same.\"\"\" self.complete_layout = complete_layout_new def cal_flow_field(self): ###################################################################### # 1. calculate ideal wind speed",
"+ (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake[index_down, index_up, l_wd] = ( self.alpha*dist_down[index_down, index_up,",
"in range(self.num_wt)], axis=0) A_ol = self.cal_overlapping_area(R_wake, R, dist_cross) self.M_ijl = np.where(dist_down>0, (A_ol/(np.pi*R**2))**2 /",
"The calculation formula can be found in Eq. (A1) of : [Ref] <NAME>,",
"A_ol: array:float Overlapping area [m^2] \"\"\" # treat all input as array R1,",
"np.argsort(x_rotated) for i_up in range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down[index_down, index_up,",
"= wd_binned self.z0=z0 self.wt_types = wf_design.wt_types self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:, 2], height_ref) self.num_wt =",
"= np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) R_wake = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) A_ol = np.zeros((self.num_wt, self.num_wt,",
"= np.where(dist_down>0, (A_ol/(np.pi*R**2))**2 / (1 + self.alpha*dist_down/R1)**4, 0) # calculate N_jk matrix v_jk",
"A_ol def cal_pdf_Weibull(self, v, A, k): return ((k / A) * (v /",
"self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il ####################################################################### # 3. calculate real wind speed",
"of : [Ref] <NAME>, <NAME>, Solving the wind farm layout optimization problem using",
"array R1, R2, d = np.array(R1), np.array(R2), np.array(d), A_ol = np.zeros_like(R1) p =",
"np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=1) for i_wt in range(self.num_wt)], axis=1) R1 = np.concatenate([",
"def cal_overlapping_area(self, R1, R2, d): \"\"\" Calculate the overlapping area of two circles",
"getAkf, height_ref, alpha=0.04, ws_binned=np.linspace(1, 30, 30), wd_binned=np.linspace(0, 330, 12), z0=0.01 ): self.complete_layout =",
"f_il = self.getAkf(x_il, y_il, wd_il) for k_ws in range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :] =",
"for i_wt in range(self.num_wt)], axis=1) R1 = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=0) for",
"downwind_order = np.argsort(x_rotated) for i_up in range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:]",
"types of turbines remained the same.\"\"\" self.complete_layout = complete_layout_new def cal_flow_field(self): ###################################################################### #",
"downwind_order[i_up+1:] dist_down[index_down, index_up, l_wd] = ( x_rotated[index_down] - x_rotated[index_up]) dist_cross[index_down, index_up, l_wd] =",
"= downwind_order[i_up+1:] dist_down[index_down, index_up, l_wd] = ( x_rotated[index_down] - x_rotated[index_up]) dist_cross[index_down, index_up, l_wd]",
"self.num_wd_bin)), axis=1) for i_wt in range(self.num_wt)], axis=1) R1 = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)),",
"np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_flow_field_naive(self): ###################################################################### # 1. calculate ideal wind speed",
"####################################################################### # 3. calculate real wind speed # calculate M_ijl matrix for l_wd",
"dist_down = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) dist_cross = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) R_wake = np.zeros((self.num_wt,",
"- x_rotated[index_up] dist_cross = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up,",
"A_ol = np.zeros_like(R1) p = (R1 + R2 + d)/2.0 # make sure",
"index_partialwake = np.logical_and(d > (Rmax -Rmin), d < (Rmin + Rmax)) alpha =",
"be 1. Parameters ---------- R1: array:float Radius of the first circle [m] R2:",
"###################################################################### # 1. calculate ideal wind speed v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1), np.expand_dims(self.ws_binned,",
"Random search algorithm, Reneable Energy 78 (2015) 182-192 Note that however there are",
"self.alpha*dist_down/self.R_list[index_up])**4) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :, 0] N_jk = np.zeros_like(v_jk)",
"x_rotated[index_down] - x_rotated[index_up]) dist_cross[index_down, index_up, l_wd] = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down,",
"ideal wind speed x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1)",
"axis=1) R1 = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=0) for i_wt in range(self.num_wt)], axis=0)",
"= downwind_order[i_up+1:] dist_down = x_rotated[index_down] - x_rotated[index_up] dist_cross = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2",
"array:float Overlapping area [m^2] \"\"\" # treat all input as array R1, R2,",
"self.complete_layout = wf_design.complete_layout self.getAkf = getAkf self.height_ref = height_ref self.alpha = alpha self.ws_binned",
"= self.v_ikl_ideal[:, :, 0] N_jk = np.zeros_like(v_jk) for m_type in set(self.type_list): index_cal =",
"np.concatenate([np.expand_dims(self.wd_binned, axis=0) for l_wt in range(self.num_wt)], axis=0) A_il, k_il, f_il = self.getAkf(x_il, y_il,",
"there are typos in Equation (A1), '2' before alpha and beta should be",
"self.num_wt, self.num_wd_bin)) A_ol = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) ####################################################################### # 3. calculate real wind",
"two centers [m] Returns ------- A_ol: array:float Overlapping area [m^2] \"\"\" # treat",
"= np.sqrt( p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake]) ) A_ol[index_partialwake] = ( alpha*Rmax[index_partialwake]**2 + beta*Rmin[index_partialwake]**2",
"- x_rotated[index_up]) dist_cross[index_down, index_up, l_wd] = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2]",
"np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1)",
"m_type in set(self.type_list): index_cal = self.type_list == m_type N_jk[index_cal, :] = self.wt_types[m_type].get_Ct( v_jk[index_cal,",
"the same.\"\"\" self.complete_layout = complete_layout_new def cal_flow_field(self): ###################################################################### # 1. calculate ideal wind",
"# calculate M_ijl matrix for l_wd in range(self.num_wd_bin): rotate_angle = (270 - self.wd_binned[l_wd])*np.pi/180.0",
"y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake[index_down, index_up, l_wd] = ( self.alpha*dist_down[index_down,",
"of the first circle [m] R2: array:float Radius of the second circle [m]",
"all input as array R1, R2, d = np.array(R1), np.array(R2), np.array(d), A_ol =",
"= self.wt_types[m_type].get_Ct( v_jk[index_cal, :]) N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal - np.sqrt( np.einsum('jk,",
"calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :, 0] N_jk = np.zeros_like(v_jk) for m_type",
"self.R_list = wf_design.D_list/2 self.Ar_list = wf_design.Ar_list self.type_list = np.array([int(t) for t in self.complete_layout[:,",
"in Eq. (A1) of : [Ref] <NAME>, <NAME>, Solving the wind farm layout",
"= np.where(R1 < R2, R1, R2) # full wake cases index_fullwake = (d<=",
"np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1), np.expand_dims(self.ws_binned, axis=0)), axis=-1) self.v_ikl_ideal = np.concatenate([v_ik for l_wd in range(self.num_wd_bin)],",
"((k / A) * (v / A) ** (k - 1) * np.exp(-(v",
"np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.pdf_ikl = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) def wind_shear_log(self, H,",
"- Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake]) ) beta = np.arccos( (Rmin[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake])",
"0] N_jk = np.zeros_like(v_jk) for m_type in set(self.type_list): index_cal = self.type_list == m_type",
"= self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_flow_field_naive(self): ###################################################################### # 1.",
"self.num_wd_bin)) dist_cross = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) R_wake = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) A_ol =",
"2. calculate pdf of local ideal wind speed x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1)",
"= np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake[index_down, index_up,",
"self.num_ws_bin, self.num_wd_bin)) self.pdf_ikl = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) def wind_shear_log(self, H, H_ref): return",
"+ d[index_partialwake]**2 - Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake]) ) beta = np.arccos( (Rmin[index_partialwake]**2.0 + d[index_partialwake]**2 -",
"in range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il dist_down",
"for i_wt in range(self.num_wt)], axis=0) A_ol = self.cal_overlapping_area(R_wake, R, dist_cross) self.M_ijl = np.where(dist_down>0,",
"= self.getAkf(x_il, y_il, wd_il) for k_ws in range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull(",
"/ (1 + self.alpha*dist_down/self.R_list[index_up])**4) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :, 0]",
"= np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1],",
"(A_ol/self.Ar_list[index_down])**2 / (1 + self.alpha*dist_down/self.R_list[index_up])**4) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :,",
"height_ref) self.num_wt = wf_design.num_wt self.num_ws_bin = len(ws_binned) self.num_wd_bin = len(wd_binned) self.ws_bin_size = ws_binned[1]",
"array:float Radius of the second circle [m] d: array:float Distance between two centers",
"= v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_flow_field_naive(self):",
"= np.concatenate([np.expand_dims(self.wd_binned, axis=0) for l_wt in range(self.num_wt)], axis=0) A_il, k_il, f_il = self.getAkf(x_il,",
"index_up, l_wd] = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2)",
"sure R_big >= R_small Rmax = np.where(R1 < R2, R2, R1) Rmin =",
"axis=1) for i_wt in range(self.num_wt)], axis=1) R1 = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=0)",
"R1, R2) # full wake cases index_fullwake = (d<= (Rmax -Rmin)) A_ol[index_fullwake] =",
"1]*cos_rotate - self.complete_layout[:, 0]*sin_rotate) downwind_order = np.argsort(x_rotated) for i_up in range(self.num_wt-1): index_up =",
"range(self.num_wt)], axis=1) R1 = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=0) for i_wt in range(self.num_wt)],",
"np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_flow_field_naive(self): ###################################################################### # 1. calculate ideal wind",
"self.num_wt, self.num_wd_bin)) ####################################################################### # 3. calculate real wind speed # calculate M_ijl matrix",
"= np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=1) for i_wt in range(self.num_wt)], axis=1) R1 =",
"axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1) for l_wd",
"in range(self.num_wd_bin): rotate_angle = (270 - self.wd_binned[l_wd])*np.pi/180.0 cos_rotate = np.cos(rotate_angle) sin_rotate = np.sin(rotate_angle)",
"self.complete_layout[:, 0]*sin_rotate) downwind_order = np.argsort(x_rotated) for i_up in range(self.num_wt-1): index_up = downwind_order[i_up] index_down",
"(self.num_wt, self.num_ws_bin, self.num_wd_bin)) def wind_shear_log(self, H, H_ref): return np.log(H/self.z0)/np.log(H_ref/self.z0) def change_layout(self, complete_layout_new): \"\"\"",
"R1, R2, d = np.array(R1), np.array(R2), np.array(d), A_ol = np.zeros_like(R1) p = (R1",
"R_small Rmax = np.where(R1 < R2, R2, R1) Rmin = np.where(R1 < R2,",
"downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down = x_rotated[index_down] - x_rotated[index_up] dist_cross = np.sqrt( (y_rotated[index_down]",
"def cal_flow_field_naive(self): ###################################################################### # 1. calculate ideal wind speed v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi,",
"self.ws_bin_size = ws_binned[1] - ws_binned[0] self.wd_bin_size = wd_binned[1] - wd_binned[0] self.R_list = wf_design.D_list/2",
"np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake = self.alpha*dist_down",
"A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2 # partial wake cases index_partialwake = np.logical_and(d > (Rmax -Rmin),",
"using Random search algorithm, Reneable Energy 78 (2015) 182-192 Note that however there",
"= alpha self.ws_binned = ws_binned self.wd_binned = wd_binned self.z0=z0 self.wt_types = wf_design.wt_types self.wind_shear_multi",
"self.num_ws_bin, self.num_wd_bin)) def wind_shear_log(self, H, H_ref): return np.log(H/self.z0)/np.log(H_ref/self.z0) def change_layout(self, complete_layout_new): \"\"\" Assume",
"Parameters ---------- R1: array:float Radius of the first circle [m] R2: array:float Radius",
"self.z0=z0 self.wt_types = wf_design.wt_types self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:, 2], height_ref) self.num_wt = wf_design.num_wt self.num_ws_bin",
"and types of turbines remained the same.\"\"\" self.complete_layout = complete_layout_new def cal_flow_field(self): ######################################################################",
"N_jk[index_cal, :] = self.wt_types[m_type].get_Ct( v_jk[index_cal, :]) N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal -",
"z0=0.01 ): self.complete_layout = wf_design.complete_layout self.getAkf = getAkf self.height_ref = height_ref self.alpha =",
"0], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1) for",
"R = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=1) for i_wt in range(self.num_wt)], axis=1) R1",
"range(self.num_wd_bin)], axis=-1) y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) wd_il",
"self.wd_binned[l_wd])*np.pi/180.0 cos_rotate = np.cos(rotate_angle) sin_rotate = np.sin(rotate_angle) x_rotated = (self.complete_layout[:, 0]*cos_rotate + self.complete_layout[:,",
"R2) # full wake cases index_fullwake = (d<= (Rmax -Rmin)) A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2",
"k_il) * f_il dist_down = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) dist_cross = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin))",
"change_layout(self, complete_layout_new): \"\"\" Assume only locations of turbines changed, and number, hub-height and",
"- self.wd_binned[l_wd])*np.pi/180.0 cos_rotate = np.cos(rotate_angle) sin_rotate = np.sin(rotate_angle) x_rotated = (self.complete_layout[:, 0]*cos_rotate +",
"= wf_design.Ar_list self.type_list = np.array([int(t) for t in self.complete_layout[:, 3]], dtype='int') self.M_ijl =",
"1. calculate ideal wind speed v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1), np.expand_dims(self.ws_binned, axis=0)), axis=-1)",
"= np.argsort(x_rotated) for i_up in range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down",
"v_jk[index_cal, :]) N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk,",
"= getAkf self.height_ref = height_ref self.alpha = alpha self.ws_binned = ws_binned self.wd_binned =",
"x_rotated[index_down] - x_rotated[index_up] dist_cross = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] -",
"l_wd in range(self.num_wd_bin)], axis=-1) ###################################################################### # 2. calculate pdf of local ideal wind",
"self.num_wd_bin = len(wd_binned) self.ws_bin_size = ws_binned[1] - ws_binned[0] self.wd_bin_size = wd_binned[1] - wd_binned[0]",
"d[index_partialwake]**2 - Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake]) ) A_triangle = np.sqrt( p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake]) )",
"[m^2] \"\"\" # treat all input as array R1, R2, d = np.array(R1),",
"wd_binned[1] - wd_binned[0] self.R_list = wf_design.D_list/2 self.Ar_list = wf_design.Ar_list self.type_list = np.array([int(t) for",
"index_up, l_wd] = ( (A_ol/self.Ar_list[index_down])**2 / (1 + self.alpha*dist_down/self.R_list[index_up])**4) # calculate N_jk matrix",
"(self.num_wt, self.num_wd_bin)), axis=0) for i_wt in range(self.num_wt)], axis=0) A_ol = self.cal_overlapping_area(R_wake, R, dist_cross)",
"axis=-1) y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) wd_il =",
"0]*sin_rotate) downwind_order = np.argsort(x_rotated) for i_up in range(self.num_wt-1): index_up = downwind_order[i_up] index_down =",
":] = self.wt_types[m_type].get_Ct( v_jk[index_cal, :]) N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal - np.sqrt(",
"= self.alpha*dist_down + self.R_list[index_up] A_ol = self.cal_overlapping_area(R_wake, self.R_list[index_down], dist_cross) self.M_ijl[index_down, index_up, l_wd] =",
"as array R1, R2, d = np.array(R1), np.array(R2), np.array(d), A_ol = np.zeros_like(R1) p",
"= self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il dist_down = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) dist_cross",
"remained the same.\"\"\" self.complete_layout = complete_layout_new def cal_flow_field(self): ###################################################################### # 1. calculate ideal",
"self.alpha*dist_down[index_down, index_up, l_wd] + self.R_list[index_up]) R = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=1) for",
"in range(self.num_wd_bin)], axis=-1) ###################################################################### # 2. calculate pdf of local ideal wind speed",
"* f_il ####################################################################### # 3. calculate real wind speed # calculate M_ijl matrix",
"Calculate the overlapping area of two circles with radius R1 and R2, centers",
"algorithm, Reneable Energy 78 (2015) 182-192 Note that however there are typos in",
"axis=0)), axis=-1) self.v_ikl_ideal = np.concatenate([v_ik for l_wd in range(self.num_wd_bin)], axis=-1) ###################################################################### # 2.",
"the first circle [m] R2: array:float Radius of the second circle [m] d:",
"array:float Distance between two centers [m] Returns ------- A_ol: array:float Overlapping area [m^2]",
"> (Rmax -Rmin), d < (Rmin + Rmax)) alpha = np.arccos( (Rmax[index_partialwake]**2.0 +",
"height_ref self.alpha = alpha self.ws_binned = ws_binned self.wd_binned = wd_binned self.z0=z0 self.wt_types =",
"index_up, l_wd] + self.R_list[index_up]) R = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=1) for i_wt",
") A_ol[index_partialwake] = ( alpha*Rmax[index_partialwake]**2 + beta*Rmin[index_partialwake]**2 - 2.0*A_triangle ) return A_ol def",
"coding: utf-8 -*- import numpy as np class FlowField(object): def __init__(self, wf_design, getAkf,",
"of turbines remained the same.\"\"\" self.complete_layout = complete_layout_new def cal_flow_field(self): ###################################################################### # 1.",
"np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=0) for i_wt in range(self.num_wt)], axis=0) A_ol = self.cal_overlapping_area(R_wake, R,",
"self.M_ijl[index_down, index_up, l_wd] = ( (A_ol/self.Ar_list[index_down])**2 / (1 + self.alpha*dist_down/self.R_list[index_up])**4) # calculate N_jk",
"be found in Eq. (A1) of : [Ref] <NAME>, <NAME>, Solving the wind",
"np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=1) for i_wt in range(self.num_wt)], axis=1) R1 = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones(",
"centers [m] Returns ------- A_ol: array:float Overlapping area [m^2] \"\"\" # treat all",
"self.height_ref = height_ref self.alpha = alpha self.ws_binned = ws_binned self.wd_binned = wd_binned self.z0=z0",
"np.sin(rotate_angle) x_rotated = (self.complete_layout[:, 0]*cos_rotate + self.complete_layout[:, 1]*sin_rotate) y_rotated = (self.complete_layout[:, 1]*cos_rotate -",
"N_jk matrix v_jk = self.v_ikl_ideal[:, :, 0] N_jk = np.zeros_like(v_jk) for m_type in",
"y_rotated = (self.complete_layout[:, 1]*cos_rotate - self.complete_layout[:, 0]*sin_rotate) downwind_order = np.argsort(x_rotated) for i_up in",
"for i_up in range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down = x_rotated[index_down]",
"x_rotated[index_up] dist_cross = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2)",
"v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_flow_field_naive(self): ######################################################################",
"A_triangle = np.sqrt( p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake]) ) A_ol[index_partialwake] = ( alpha*Rmax[index_partialwake]**2 +",
"= len(ws_binned) self.num_wd_bin = len(wd_binned) self.ws_bin_size = ws_binned[1] - ws_binned[0] self.wd_bin_size = wd_binned[1]",
"R1: array:float Radius of the first circle [m] R2: array:float Radius of the",
"A_ol = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) ####################################################################### # 3. calculate real wind speed #",
"78 (2015) 182-192 Note that however there are typos in Equation (A1), '2'",
"R1 and R2, centers distanced d. The calculation formula can be found in",
"np.array(R2), np.array(d), A_ol = np.zeros_like(R1) p = (R1 + R2 + d)/2.0 #",
"# -*- coding: utf-8 -*- import numpy as np class FlowField(object): def __init__(self,",
"np.expand_dims(self.ws_binned, axis=0)), axis=-1) self.v_ikl_ideal = np.concatenate([v_ik for l_wd in range(self.num_wd_bin)], axis=-1) ###################################################################### #",
"index_up, l_wd] = ( self.alpha*dist_down[index_down, index_up, l_wd] + self.R_list[index_up]) R = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones(",
"k_il, f_il = self.getAkf(x_il, y_il, wd_il) for k_ws in range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :]",
"range(self.num_wt)], axis=0) A_il, k_il, f_il = self.getAkf(x_il, y_il, wd_il) for k_ws in range(self.num_ws_bin):",
"self.Ar_list = wf_design.Ar_list self.type_list = np.array([int(t) for t in self.complete_layout[:, 3]], dtype='int') self.M_ijl",
"= np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=0) for i_wt in range(self.num_wt)], axis=0) A_ol =",
"= np.concatenate([v_ik for l_wd in range(self.num_wd_bin)], axis=-1) ###################################################################### # 2. calculate pdf of",
"self.complete_layout[:, 1]*sin_rotate) y_rotated = (self.complete_layout[:, 1]*cos_rotate - self.complete_layout[:, 0]*sin_rotate) downwind_order = np.argsort(x_rotated) for",
"m_type N_jk[index_cal, :] = self.wt_types[m_type].get_Ct( v_jk[index_cal, :]) N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal",
"x_rotated = (self.complete_layout[:, 0]*cos_rotate + self.complete_layout[:, 1]*sin_rotate) y_rotated = (self.complete_layout[:, 1]*cos_rotate - self.complete_layout[:,",
"self.M_ijl)) def cal_flow_field_naive(self): ###################################################################### # 1. calculate ideal wind speed v_ik = np.expand_dims(",
"self.num_wd_bin)) self.v_ikl_ideal = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.v_ikl_real = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin))",
"index_fullwake = (d<= (Rmax -Rmin)) A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2 # partial wake cases index_partialwake",
"wind speed v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1), np.expand_dims(self.ws_binned, axis=0)), axis=-1) self.v_ikl_ideal = np.concatenate([v_ik",
"k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il ####################################################################### # 3. calculate",
"(p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake]) ) A_ol[index_partialwake] = ( alpha*Rmax[index_partialwake]**2 + beta*Rmin[index_partialwake]**2 - 2.0*A_triangle ) return",
"range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il ####################################################################### #",
"beta = np.arccos( (Rmin[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake]) ) A_triangle = np.sqrt(",
"of two circles with radius R1 and R2, centers distanced d. The calculation",
"range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down = x_rotated[index_down] - x_rotated[index_up] dist_cross",
"self.alpha*dist_down + self.R_list[index_up] A_ol = self.cal_overlapping_area(R_wake, self.R_list[index_down], dist_cross) self.M_ijl[index_down, index_up, l_wd] = (",
"R2, centers distanced d. The calculation formula can be found in Eq. (A1)",
"np.array(d), A_ol = np.zeros_like(R1) p = (R1 + R2 + d)/2.0 # make",
"= np.sin(rotate_angle) x_rotated = (self.complete_layout[:, 0]*cos_rotate + self.complete_layout[:, 1]*sin_rotate) y_rotated = (self.complete_layout[:, 1]*cos_rotate",
"= self.cal_overlapping_area(R_wake, self.R_list[index_down], dist_cross) self.M_ijl[index_down, index_up, l_wd] = ( (A_ol/self.Ar_list[index_down])**2 / (1 +",
"found in Eq. (A1) of : [Ref] <NAME>, <NAME>, Solving the wind farm",
"d: array:float Distance between two centers [m] Returns ------- A_ol: array:float Overlapping area",
"np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) self.v_ikl_ideal = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.v_ikl_real = np.zeros( (self.num_wt,",
"N_jk, self.M_ijl)) def cal_flow_field_naive(self): ###################################################################### # 1. calculate ideal wind speed v_ik =",
"R_big >= R_small Rmax = np.where(R1 < R2, R2, R1) Rmin = np.where(R1",
"N_jk, self.M_ijl)) def cal_overlapping_area(self, R1, R2, d): \"\"\" Calculate the overlapping area of",
"Note that however there are typos in Equation (A1), '2' before alpha and",
"two circles with radius R1 and R2, centers distanced d. The calculation formula",
"self.num_wd_bin)) self.v_ikl_real = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.pdf_ikl = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin))",
"self.v_ikl_real = self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_overlapping_area(self, R1, R2,",
"R2 + d)/2.0 # make sure R_big >= R_small Rmax = np.where(R1 <",
"np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=0) for i_wt in range(self.num_wt)], axis=0) A_ol = self.cal_overlapping_area(R_wake,",
"utf-8 -*- import numpy as np class FlowField(object): def __init__(self, wf_design, getAkf, height_ref,",
"12), z0=0.01 ): self.complete_layout = wf_design.complete_layout self.getAkf = getAkf self.height_ref = height_ref self.alpha",
"beta should be 1. Parameters ---------- R1: array:float Radius of the first circle",
"(self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.v_ikl_real = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.pdf_ikl = np.zeros( (self.num_wt,",
"self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il) * f_il ####################################################################### # 3. calculate real wind speed #",
"R1) Rmin = np.where(R1 < R2, R1, R2) # full wake cases index_fullwake",
"= np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0)",
"self.v_ikl_real = self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_flow_field_naive(self): ###################################################################### #",
"def wind_shear_log(self, H, H_ref): return np.log(H/self.z0)/np.log(H_ref/self.z0) def change_layout(self, complete_layout_new): \"\"\" Assume only locations",
"speed x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) y_il =",
"speed v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1), np.expand_dims(self.ws_binned, axis=0)), axis=-1) self.v_ikl_ideal = np.concatenate([v_ik for",
"= ( alpha*Rmax[index_partialwake]**2 + beta*Rmin[index_partialwake]**2 - 2.0*A_triangle ) return A_ol def cal_pdf_Weibull(self, v,",
"R_wake = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) A_ol = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) ####################################################################### # 3.",
"+ self.R_list[index_up]) R = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=1) for i_wt in range(self.num_wt)],",
"turbines remained the same.\"\"\" self.complete_layout = complete_layout_new def cal_flow_field(self): ###################################################################### # 1. calculate",
"= np.pi*Rmin[index_fullwake]**2 # partial wake cases index_partialwake = np.logical_and(d > (Rmax -Rmin), d",
"def __init__(self, wf_design, getAkf, height_ref, alpha=0.04, ws_binned=np.linspace(1, 30, 30), wd_binned=np.linspace(0, 330, 12), z0=0.01",
"-*- coding: utf-8 -*- import numpy as np class FlowField(object): def __init__(self, wf_design,",
"# 1. calculate ideal wind speed v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1), np.expand_dims(self.ws_binned, axis=0)),",
"= height_ref self.alpha = alpha self.ws_binned = ws_binned self.wd_binned = wd_binned self.z0=z0 self.wt_types",
"wd_il) for k_ws in range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il), A_il, k_il)",
"k_il) * f_il ####################################################################### # 3. calculate real wind speed # calculate M_ijl",
"i_wt in range(self.num_wt)], axis=0) A_ol = self.cal_overlapping_area(R_wake, R, dist_cross) self.M_ijl = np.where(dist_down>0, (A_ol/(np.pi*R**2))**2",
"# full wake cases index_fullwake = (d<= (Rmax -Rmin)) A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2 #",
"3. calculate real wind speed # calculate M_ijl matrix for l_wd in range(self.num_wd_bin):",
"= self.cal_overlapping_area(R_wake, R, dist_cross) self.M_ijl = np.where(dist_down>0, (A_ol/(np.pi*R**2))**2 / (1 + self.alpha*dist_down/R1)**4, 0)",
"self.v_ikl_ideal[:, :, 0] N_jk = np.zeros_like(v_jk) for m_type in set(self.type_list): index_cal = self.type_list",
"= np.zeros_like(v_jk) for m_type in set(self.type_list): index_cal = self.type_list == m_type N_jk[index_cal, :]",
"= np.array(R1), np.array(R2), np.array(d), A_ol = np.zeros_like(R1) p = (R1 + R2 +",
"x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) y_il = np.concatenate([np.expand_dims(self.complete_layout[:,",
"Rmin = np.where(R1 < R2, R1, R2) # full wake cases index_fullwake =",
"Reneable Energy 78 (2015) 182-192 Note that however there are typos in Equation",
"input as array R1, R2, d = np.array(R1), np.array(R2), np.array(d), A_ol = np.zeros_like(R1)",
"alpha = np.arccos( (Rmax[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake]) ) beta = np.arccos(",
"- np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_flow_field_naive(self): ###################################################################### # 1. calculate ideal",
"first circle [m] R2: array:float Radius of the second circle [m] d: array:float",
"ijl->ikl', N_jk, self.M_ijl)) def cal_flow_field_naive(self): ###################################################################### # 1. calculate ideal wind speed v_ik",
"np.concatenate([v_ik for l_wd in range(self.num_wd_bin)], axis=-1) ###################################################################### # 2. calculate pdf of local",
"( x_rotated[index_down] - x_rotated[index_up]) dist_cross[index_down, index_up, l_wd] = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 +",
"area of two circles with radius R1 and R2, centers distanced d. The",
"treat all input as array R1, R2, d = np.array(R1), np.array(R2), np.array(d), A_ol",
"turbines changed, and number, hub-height and types of turbines remained the same.\"\"\" self.complete_layout",
"dist_cross[index_down, index_up, l_wd] = np.sqrt( (y_rotated[index_down] - y_rotated[index_up])**2 + (self.complete_layout[index_down, 2] - self.complete_layout[index_up,",
"np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.v_ikl_real = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.pdf_ikl = np.zeros(",
"3]], dtype='int') self.M_ijl = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) self.v_ikl_ideal = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin))",
"= np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) A_ol = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) ####################################################################### # 3. calculate",
"< R2, R1, R2) # full wake cases index_fullwake = (d<= (Rmax -Rmin))",
"-Rmin), d < (Rmin + Rmax)) alpha = np.arccos( (Rmax[index_partialwake]**2.0 + d[index_partialwake]**2 -",
"y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1) wd_il = np.concatenate([np.expand_dims(self.wd_binned,",
"= np.zeros_like(R1) p = (R1 + R2 + d)/2.0 # make sure R_big",
"Overlapping area [m^2] \"\"\" # treat all input as array R1, R2, d",
"30), wd_binned=np.linspace(0, 330, 12), z0=0.01 ): self.complete_layout = wf_design.complete_layout self.getAkf = getAkf self.height_ref",
"self.v_ikl_ideal = np.concatenate([v_ik for l_wd in range(self.num_wd_bin)], axis=-1) ###################################################################### # 2. calculate pdf",
"axis=0) for i_wt in range(self.num_wt)], axis=0) A_ol = self.cal_overlapping_area(R_wake, R, dist_cross) self.M_ijl =",
"+ self.alpha*dist_down/R1)**4, 0) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :, 0] N_jk",
"(1 + self.alpha*dist_down/self.R_list[index_up])**4) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :, 0] N_jk",
"matrix for l_wd in range(self.num_wd_bin): rotate_angle = (270 - self.wd_binned[l_wd])*np.pi/180.0 cos_rotate = np.cos(rotate_angle)",
"self.wind_shear_multi = self.wind_shear_log(self.complete_layout[:, 2], height_ref) self.num_wt = wf_design.num_wt self.num_ws_bin = len(ws_binned) self.num_wd_bin =",
"- np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_overlapping_area(self, R1, R2, d): \"\"\" Calculate",
"2])**2) R_wake[index_down, index_up, l_wd] = ( self.alpha*dist_down[index_down, index_up, l_wd] + self.R_list[index_up]) R =",
"< R2, R2, R1) Rmin = np.where(R1 < R2, R1, R2) # full",
"(self.complete_layout[:, 1]*cos_rotate - self.complete_layout[:, 0]*sin_rotate) downwind_order = np.argsort(x_rotated) for i_up in range(self.num_wt-1): index_up",
"+ self.complete_layout[:, 1]*sin_rotate) y_rotated = (self.complete_layout[:, 1]*cos_rotate - self.complete_layout[:, 0]*sin_rotate) downwind_order = np.argsort(x_rotated)",
"self.v_ikl_real = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.pdf_ikl = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) def",
"# 3. calculate real wind speed # calculate M_ijl matrix for l_wd in",
"index_up, l_wd] = ( x_rotated[index_down] - x_rotated[index_up]) dist_cross[index_down, index_up, l_wd] = np.sqrt( (y_rotated[index_down]",
"= np.array([int(t) for t in self.complete_layout[:, 3]], dtype='int') self.M_ijl = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin))",
"self.v_ikl_ideal = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.v_ikl_real = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.pdf_ikl",
"complete_layout_new): \"\"\" Assume only locations of turbines changed, and number, hub-height and types",
"l_wd] = ( (A_ol/self.Ar_list[index_down])**2 / (1 + self.alpha*dist_down/self.R_list[index_up])**4) # calculate N_jk matrix v_jk",
"( alpha*Rmax[index_partialwake]**2 + beta*Rmin[index_partialwake]**2 - 2.0*A_triangle ) return A_ol def cal_pdf_Weibull(self, v, A,",
"Radius of the second circle [m] d: array:float Distance between two centers [m]",
"dist_down[index_down, index_up, l_wd] = ( x_rotated[index_down] - x_rotated[index_up]) dist_cross[index_down, index_up, l_wd] = np.sqrt(",
"(self.complete_layout[index_down, 2] - self.complete_layout[index_up, 2])**2) R_wake[index_down, index_up, l_wd] = ( self.alpha*dist_down[index_down, index_up, l_wd]",
"= np.cos(rotate_angle) sin_rotate = np.sin(rotate_angle) x_rotated = (self.complete_layout[:, 0]*cos_rotate + self.complete_layout[:, 1]*sin_rotate) y_rotated",
"= np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.pdf_ikl = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) def wind_shear_log(self,",
"Rmax)) alpha = np.arccos( (Rmax[index_partialwake]**2.0 + d[index_partialwake]**2 - Rmin[index_partialwake]**2) /(2.0*Rmax[index_partialwake]*d[index_partialwake]) ) beta =",
"self.getAkf(x_il, y_il, wd_il) for k_ws in range(self.num_ws_bin): self.pdf_ikl[:, k_ws, :] = self.cal_pdf_Weibull( self.ws_binned[k_ws]*np.ones_like(A_il),",
"= self.type_list == m_type N_jk[index_cal, :] = self.wt_types[m_type].get_Ct( v_jk[index_cal, :]) N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2",
"return A_ol def cal_pdf_Weibull(self, v, A, k): return ((k / A) * (v",
"in range(self.num_wd_bin)], axis=-1) y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1) for l_wd in range(self.num_wd_bin)], axis=-1)",
"Returns ------- A_ol: array:float Overlapping area [m^2] \"\"\" # treat all input as",
"l_wd] + self.R_list[index_up]) R = np.concatenate([ np.expand_dims(self.R_list[i_wt]*np.ones( (self.num_wt, self.num_wd_bin)), axis=1) for i_wt in",
"for i_up in range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down[index_down, index_up, l_wd]",
"p = (R1 + R2 + d)/2.0 # make sure R_big >= R_small",
"wind farm layout optimization problem using Random search algorithm, Reneable Energy 78 (2015)",
"self.complete_layout[:, 3]], dtype='int') self.M_ijl = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) self.v_ikl_ideal = np.zeros( (self.num_wt, self.num_ws_bin,",
"R1, R2, d): \"\"\" Calculate the overlapping area of two circles with radius",
"wf_design.num_wt self.num_ws_bin = len(ws_binned) self.num_wd_bin = len(wd_binned) self.ws_bin_size = ws_binned[1] - ws_binned[0] self.wd_bin_size",
"make sure R_big >= R_small Rmax = np.where(R1 < R2, R2, R1) Rmin",
"= self.wind_shear_log(self.complete_layout[:, 2], height_ref) self.num_wt = wf_design.num_wt self.num_ws_bin = len(ws_binned) self.num_wd_bin = len(wd_binned)",
"= complete_layout_new def cal_flow_field(self): ###################################################################### # 1. calculate ideal wind speed v_ik =",
"axis=-1) self.v_ikl_ideal = np.concatenate([v_ik for l_wd in range(self.num_wd_bin)], axis=-1) ###################################################################### # 2. calculate",
"and R2, centers distanced d. The calculation formula can be found in Eq.",
"axis=-1), np.expand_dims(self.ws_binned, axis=0)), axis=-1) self.v_ikl_ideal = np.concatenate([v_ik for l_wd in range(self.num_wd_bin)], axis=-1) ######################################################################",
"== m_type N_jk[index_cal, :] = self.wt_types[m_type].get_Ct( v_jk[index_cal, :]) N_jk = v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real =",
">= R_small Rmax = np.where(R1 < R2, R2, R1) Rmin = np.where(R1 <",
"(A1), '2' before alpha and beta should be 1. Parameters ---------- R1: array:float",
"= v_jk**2*(1-np.sqrt(1-N_jk))**2 self.v_ikl_real = self.v_ikl_ideal - np.sqrt( np.einsum('jk, ijl->ikl', N_jk, self.M_ijl)) def cal_overlapping_area(self,",
"calculate real wind speed # calculate M_ijl matrix for l_wd in range(self.num_wd_bin): rotate_angle",
"= wf_design.num_wt self.num_ws_bin = len(ws_binned) self.num_wd_bin = len(wd_binned) self.ws_bin_size = ws_binned[1] - ws_binned[0]",
"R_wake[index_down, index_up, l_wd] = ( self.alpha*dist_down[index_down, index_up, l_wd] + self.R_list[index_up]) R = np.concatenate([",
"calculation formula can be found in Eq. (A1) of : [Ref] <NAME>, <NAME>,",
"overlapping area of two circles with radius R1 and R2, centers distanced d.",
"wd_binned=np.linspace(0, 330, 12), z0=0.01 ): self.complete_layout = wf_design.complete_layout self.getAkf = getAkf self.height_ref =",
"- Rmax[index_partialwake]**2) /(2.0*Rmin[index_partialwake]*d[index_partialwake]) ) A_triangle = np.sqrt( p[index_partialwake]* (p[index_partialwake]-Rmin[index_partialwake])* (p[index_partialwake]-Rmax[index_partialwake])* (p[index_partialwake]-d[index_partialwake]) ) A_ol[index_partialwake]",
"/ A) * (v / A) ** (k - 1) * np.exp(-(v /",
"= (d<= (Rmax -Rmin)) A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2 # partial wake cases index_partialwake =",
"for l_wd in range(self.num_wd_bin): rotate_angle = (270 - self.wd_binned[l_wd])*np.pi/180.0 cos_rotate = np.cos(rotate_angle) sin_rotate",
"ws_binned[0] self.wd_bin_size = wd_binned[1] - wd_binned[0] self.R_list = wf_design.D_list/2 self.Ar_list = wf_design.Ar_list self.type_list",
"0]*cos_rotate + self.complete_layout[:, 1]*sin_rotate) y_rotated = (self.complete_layout[:, 1]*cos_rotate - self.complete_layout[:, 0]*sin_rotate) downwind_order =",
"self.wd_bin_size = wd_binned[1] - wd_binned[0] self.R_list = wf_design.D_list/2 self.Ar_list = wf_design.Ar_list self.type_list =",
"beta*Rmin[index_partialwake]**2 - 2.0*A_triangle ) return A_ol def cal_pdf_Weibull(self, v, A, k): return ((k",
"= np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1), np.expand_dims(self.ws_binned, axis=0)), axis=-1) self.v_ikl_ideal = np.concatenate([v_ik for l_wd in",
"self.pdf_ikl = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) def wind_shear_log(self, H, H_ref): return np.log(H/self.z0)/np.log(H_ref/self.z0) def",
"self.R_list[index_up] A_ol = self.cal_overlapping_area(R_wake, self.R_list[index_down], dist_cross) self.M_ijl[index_down, index_up, l_wd] = ( (A_ol/self.Ar_list[index_down])**2 /",
"np.where(R1 < R2, R2, R1) Rmin = np.where(R1 < R2, R1, R2) #",
"axis=0) A_ol = self.cal_overlapping_area(R_wake, R, dist_cross) self.M_ijl = np.where(dist_down>0, (A_ol/(np.pi*R**2))**2 / (1 +",
"H_ref): return np.log(H/self.z0)/np.log(H_ref/self.z0) def change_layout(self, complete_layout_new): \"\"\" Assume only locations of turbines changed,",
"optimization problem using Random search algorithm, Reneable Energy 78 (2015) 182-192 Note that",
"calculate M_ijl matrix for l_wd in range(self.num_wd_bin): rotate_angle = (270 - self.wd_binned[l_wd])*np.pi/180.0 cos_rotate",
"f_il dist_down = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) dist_cross = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) R_wake =",
"N_jk = np.zeros_like(v_jk) for m_type in set(self.type_list): index_cal = self.type_list == m_type N_jk[index_cal,",
"speed # calculate M_ijl matrix for l_wd in range(self.num_wd_bin): rotate_angle = (270 -",
"self.num_wt, self.num_wd_bin)) R_wake = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) A_ol = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) #######################################################################",
"(R1 + R2 + d)/2.0 # make sure R_big >= R_small Rmax =",
"for l_wd in range(self.num_wd_bin)], axis=-1) y_il = np.concatenate([np.expand_dims(self.complete_layout[:, 1], axis=-1) for l_wd in",
"R2, R1) Rmin = np.where(R1 < R2, R1, R2) # full wake cases",
"= np.logical_and(d > (Rmax -Rmin), d < (Rmin + Rmax)) alpha = np.arccos(",
"d)/2.0 # make sure R_big >= R_small Rmax = np.where(R1 < R2, R2,",
"l_wd in range(self.num_wd_bin)], axis=-1) wd_il = np.concatenate([np.expand_dims(self.wd_binned, axis=0) for l_wt in range(self.num_wt)], axis=0)",
"wf_design.D_list/2 self.Ar_list = wf_design.Ar_list self.type_list = np.array([int(t) for t in self.complete_layout[:, 3]], dtype='int')",
"l_wd] = ( x_rotated[index_down] - x_rotated[index_up]) dist_cross[index_down, index_up, l_wd] = np.sqrt( (y_rotated[index_down] -",
"self.type_list = np.array([int(t) for t in self.complete_layout[:, 3]], dtype='int') self.M_ijl = np.zeros((self.num_wt, self.num_wt,",
"complete_layout_new def cal_flow_field(self): ###################################################################### # 1. calculate ideal wind speed v_ik = np.expand_dims(",
"np.cos(rotate_angle) sin_rotate = np.sin(rotate_angle) x_rotated = (self.complete_layout[:, 0]*cos_rotate + self.complete_layout[:, 1]*sin_rotate) y_rotated =",
"= np.where(R1 < R2, R2, R1) Rmin = np.where(R1 < R2, R1, R2)",
"(d<= (Rmax -Rmin)) A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2 # partial wake cases index_partialwake = np.logical_and(d",
"wake cases index_fullwake = (d<= (Rmax -Rmin)) A_ol[index_fullwake] = np.pi*Rmin[index_fullwake]**2 # partial wake",
"30, 30), wd_binned=np.linspace(0, 330, 12), z0=0.01 ): self.complete_layout = wf_design.complete_layout self.getAkf = getAkf",
"= np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.v_ikl_real = np.zeros( (self.num_wt, self.num_ws_bin, self.num_wd_bin)) self.pdf_ikl =",
"A_il, k_il) * f_il dist_down = np.zeros((self.num_wt, self.num_wt, self.num_wd_bin)) dist_cross = np.zeros((self.num_wt, self.num_wt,",
"layout optimization problem using Random search algorithm, Reneable Energy 78 (2015) 182-192 Note",
"0) # calculate N_jk matrix v_jk = self.v_ikl_ideal[:, :, 0] N_jk = np.zeros_like(v_jk)",
"cal_flow_field_naive(self): ###################################################################### # 1. calculate ideal wind speed v_ik = np.expand_dims( np.matmul(np.expand_dims(self.wind_shear_multi, axis=-1),",
"np.argsort(x_rotated) for i_up in range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down =",
"changed, and number, hub-height and types of turbines remained the same.\"\"\" self.complete_layout =",
"l_wt in range(self.num_wt)], axis=0) A_il, k_il, f_il = self.getAkf(x_il, y_il, wd_il) for k_ws",
"in range(self.num_wt-1): index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down = x_rotated[index_down] - x_rotated[index_up]",
"index_down = downwind_order[i_up+1:] dist_down[index_down, index_up, l_wd] = ( x_rotated[index_down] - x_rotated[index_up]) dist_cross[index_down, index_up,",
"- 2.0*A_triangle ) return A_ol def cal_pdf_Weibull(self, v, A, k): return ((k /",
"local ideal wind speed x_il = np.concatenate([np.expand_dims(self.complete_layout[:, 0], axis=-1) for l_wd in range(self.num_wd_bin)],",
"axis=0) A_il, k_il, f_il = self.getAkf(x_il, y_il, wd_il) for k_ws in range(self.num_ws_bin): self.pdf_ikl[:,",
"index_up = downwind_order[i_up] index_down = downwind_order[i_up+1:] dist_down = x_rotated[index_down] - x_rotated[index_up] dist_cross =",
"sin_rotate = np.sin(rotate_angle) x_rotated = (self.complete_layout[:, 0]*cos_rotate + self.complete_layout[:, 1]*sin_rotate) y_rotated = (self.complete_layout[:,"
] |
[
"= 0, 1 while a < n: print(a, end=' ') a, b =",
"# return Fibonacci series up to n result = [] a, b =",
"b print() def fib2(n): # return Fibonacci series up to n result =",
"a < n: print(a, end=' ') a, b = b, a + b",
"= [] a, b = 0, 1 while a < n: result.append(a) a,",
"b, a + b print() def fib2(n): # return Fibonacci series up to",
"fib(n): # write Fibonacci series up to n a, b = 0, 1",
"print() def fib2(n): # return Fibonacci series up to n result = []",
"series up to n a, b = 0, 1 while a < n:",
"1 while a < n: result.append(a) a, b = b, a + b",
"b, a + b return result def num(n1, n2): n3=random.randint(n1, n2) return n3",
"< n: print(a, end=' ') a, b = b, a + b print()",
"n\"\"\" def fib(n): # write Fibonacci series up to n a, b =",
"n result = [] a, b = 0, 1 while a < n:",
"n: print(a, end=' ') a, b = b, a + b print() def",
"a, b = b, a + b print() def fib2(n): # return Fibonacci",
"result = [] a, b = 0, 1 while a < n: result.append(a)",
"# write Fibonacci series up to n a, b = 0, 1 while",
"n: result.append(a) a, b = b, a + b return result def num(n1,",
"while a < n: print(a, end=' ') a, b = b, a +",
"') a, b = b, a + b print() def fib2(n): # return",
"up to n result = [] a, b = 0, 1 while a",
"\"\"\" Fibonancci series up to n\"\"\" def fib(n): # write Fibonacci series up",
"Fibonacci series up to n a, b = 0, 1 while a <",
"def fib2(n): # return Fibonacci series up to n result = [] a,",
"a, b = 0, 1 while a < n: result.append(a) a, b =",
"a, b = 0, 1 while a < n: print(a, end=' ') a,",
"up to n a, b = 0, 1 while a < n: print(a,",
"to n a, b = 0, 1 while a < n: print(a, end='",
"< n: result.append(a) a, b = b, a + b return result def",
"a, b = b, a + b return result def num(n1, n2): n3=random.randint(n1,",
"series up to n result = [] a, b = 0, 1 while",
"to n result = [] a, b = 0, 1 while a <",
"fib2(n): # return Fibonacci series up to n result = [] a, b",
"print(a, end=' ') a, b = b, a + b print() def fib2(n):",
"b = 0, 1 while a < n: print(a, end=' ') a, b",
"a < n: result.append(a) a, b = b, a + b return result",
"while a < n: result.append(a) a, b = b, a + b return",
"= b, a + b print() def fib2(n): # return Fibonacci series up",
"Fibonacci series up to n result = [] a, b = 0, 1",
"write Fibonacci series up to n a, b = 0, 1 while a",
"b = 0, 1 while a < n: result.append(a) a, b = b,",
"def fib(n): # write Fibonacci series up to n a, b = 0,",
"up to n\"\"\" def fib(n): # write Fibonacci series up to n a,",
"+ b print() def fib2(n): # return Fibonacci series up to n result",
"b = b, a + b print() def fib2(n): # return Fibonacci series",
"Fibonancci series up to n\"\"\" def fib(n): # write Fibonacci series up to",
"series up to n\"\"\" def fib(n): # write Fibonacci series up to n",
"end=' ') a, b = b, a + b print() def fib2(n): #",
"a + b print() def fib2(n): # return Fibonacci series up to n",
"[] a, b = 0, 1 while a < n: result.append(a) a, b",
"0, 1 while a < n: result.append(a) a, b = b, a +",
"0, 1 while a < n: print(a, end=' ') a, b = b,",
"b = b, a + b return result def num(n1, n2): n3=random.randint(n1, n2)",
"= 0, 1 while a < n: result.append(a) a, b = b, a",
"result.append(a) a, b = b, a + b return result def num(n1, n2):",
"n a, b = 0, 1 while a < n: print(a, end=' ')",
"return Fibonacci series up to n result = [] a, b = 0,",
"to n\"\"\" def fib(n): # write Fibonacci series up to n a, b",
"1 while a < n: print(a, end=' ') a, b = b, a",
"= b, a + b return result def num(n1, n2): n3=random.randint(n1, n2) return"
] |
[
"namedtuple(\"Person\", [\"name\", \"age\", \"gender\"]) people = [ Person(\"Andy\", 30, \"m\"), Person(\"Ping\", 1, \"m\"),",
"them here, but that would end the string! I know! I'll use a",
"= -3.5 my_fraction = 1/2 # what do you think THIS line of",
"type called a namedtuple which is similar to a struct data type in",
"a slash as an escape character. Triple quotes look like this: \\\"\\\"\\\" Sincerely,",
"BOOLEANS print(\"NUMBERS\") my_float = 0.5 my_integer = 7 my_negative = -3.5 my_fraction =",
"@Date : 2017-10-14 19:45:05 # @Author : jingray (<EMAIL>) # @Link : http://www.jianshu.com/u/01fb0364467d",
"\"\\n\" print(my_string_1, my_string_2) print(my_multiline_string) print(newline_character) print(\"-----------\") print(newline_character) # NUMBERS AND BOOLEANS print(\"NUMBERS\") my_float",
"is equal to\", my_integer ** 2) print(\"Does\", my_fraction, \"equal\", my_float, \"?\", does_half_equal_point_five) for",
"rd a = rd.random() b = rd.random() c = rd.random() print(\"a is\", a)",
"[x for x in range(10) if x % 2 == 1] print(\"Odds \",",
"do you think THIS line of code will assign to the variable #",
"Person(\"Ping\", 1, \"m\"), Person(\"Tina\", 32, \"f\"), Person(\"Abby\", 14, \"f\"), Person(\"Adah\", 13, \"f\"), Person(\"Sebastian\",",
"\", squares) odds = [x for x in range(10) if x % 2",
"names:\", male_names) teen_names = [p.name for p in people if 13 <= p.age",
"my_float = 0.5 my_integer = 7 my_negative = -3.5 my_fraction = 1/2 #",
"= \"hello\" my_string_2 = 'world' my_multiline_string = \"\"\" Dear World, Hello. I am",
"x in range(10)] print(\"Squares \", squares) odds = [x for x in range(10)",
"languages. from collections import namedtuple Person = namedtuple(\"Person\", [\"name\", \"age\", \"gender\"]) people =",
"slash as an escape character. Triple quotes look like this: \\\"\\\"\\\" Sincerely, Python",
"NUMBERS AND BOOLEANS print(\"NUMBERS\") my_float = 0.5 my_integer = 7 my_negative = -3.5",
"print(newline_character) print(\"-----------\") print(newline_character) # NUMBERS AND BOOLEANS print(\"NUMBERS\") my_float = 0.5 my_integer =",
"for x in range(10)] print(\"Squares \", squares) odds = [x for x in",
"# NUMBERS AND BOOLEANS print(\"NUMBERS\") my_float = 0.5 my_integer = 7 my_negative =",
": jingray (<EMAIL>) # @Link : http://www.jianshu.com/u/01fb0364467d # @Version : $Id$ import os",
"print(\"STRINGS\") my_string_1 = \"hello\" my_string_2 = 'world' my_multiline_string = \"\"\" Dear World, Hello.",
"a list...\") for i, item in enumerate(my_list): print(\"item number\", i, \"is\", item) print(\"Another",
"in triple quotes. I'd write them here, but that would end the string!",
"for x in range(10)] print(\"Numbers 0 to 9\", numbers_0_to_9) squares = [x *",
"people = [ Person(\"Andy\", 30, \"m\"), Person(\"Ping\", 1, \"m\"), Person(\"Tina\", 32, \"f\"), Person(\"Abby\",",
"= 'world' my_multiline_string = \"\"\" Dear World, Hello. I am a multiline python",
"my_fraction = 1/2 # what do you think THIS line of code will",
"if 13 <= p.age <= 18 ] print(\"Teen names:\", teen_names) # random import",
"index = my_list.index(item) print(\"item\", item, \"has index\", index) #List Comprehensions numbers_0_to_9 = [x",
"# random import random as rd a = rd.random() b = rd.random() c",
"does_half_equal_point_five? does_half_equal_point_five = (my_fraction == my_float) print(\"The absolute value of\", my_negative, \"is\", abs(my_negative))",
"print(\"-----------\") print(newline_character) # NUMBERS AND BOOLEANS print(\"NUMBERS\") my_float = 0.5 my_integer = 7",
"triple quotes. I'd write them here, but that would end the string! I",
"\\\"\\\"\\\" Sincerely, Python \"\"\" newline_character = \"\\n\" print(my_string_1, my_string_2) print(my_multiline_string) print(newline_character) print(\"-----------\") print(newline_character)",
"** 2) print(\"Does\", my_fraction, \"equal\", my_float, \"?\", does_half_equal_point_five) for left_num in range(10): for",
"my_list = [1, 2, 3, \"a\", \"b\", \"c\"] print(\"my_list is:\", my_list) print(\"Enumerating a",
"my_list) print(\"Enumerating a list...\") for i, item in enumerate(my_list): print(\"item number\", i, \"is\",",
"[x for x in range(10)] print(\"Numbers 0 to 9\", numbers_0_to_9) squares = [x",
"a data type called a namedtuple which is similar to a struct data",
"] # first, let's show how this namedtuple works. andy = people[0] print(\"name:",
"% 2 == 1] print(\"Odds \", odds) # This example uses a data",
"import namedtuple Person = namedtuple(\"Person\", [\"name\", \"age\", \"gender\"]) people = [ Person(\"Andy\", 30,",
"1/2 # what do you think THIS line of code will assign to",
"to\", my_integer ** 2) print(\"Does\", my_fraction, \"equal\", my_float, \"?\", does_half_equal_point_five) for left_num in",
"to a struct data type in other languages. from collections import namedtuple Person",
"from collections import namedtuple Person = namedtuple(\"Person\", [\"name\", \"age\", \"gender\"]) people = [",
"list comprehension # male_names = [person.name for person in people if person.gender==\"m\"] print(\"Male",
"== 1] print(\"Odds \", odds) # This example uses a data type called",
"struct data type in other languages. from collections import namedtuple Person = namedtuple(\"Person\",",
"other languages. from collections import namedtuple Person = namedtuple(\"Person\", [\"name\", \"age\", \"gender\"]) people",
"#List Comprehensions numbers_0_to_9 = [x for x in range(10)] print(\"Numbers 0 to 9\",",
"x in range(10)] print(\"Numbers 0 to 9\", numbers_0_to_9) squares = [x * x",
"2 == 1] print(\"Odds \", odds) # This example uses a data type",
"19:45:05 # @Author : jingray (<EMAIL>) # @Link : http://www.jianshu.com/u/01fb0364467d # @Version :",
"(<EMAIL>) # @Link : http://www.jianshu.com/u/01fb0364467d # @Version : $Id$ import os # STRINGS",
"0 to 9\", numbers_0_to_9) squares = [x * x for x in range(10)]",
"print(\"name: \", andy.name) print(\"age: \", andy.age) print(\"gender:\", andy.gender) # now let's show what",
"2, 3, \"a\", \"b\", \"c\"] print(\"my_list is:\", my_list) print(\"Enumerating a list...\") for i,",
"numbers_0_to_9) squares = [x * x for x in range(10)] print(\"Squares \", squares)",
"print(newline_character) # NUMBERS AND BOOLEANS print(\"NUMBERS\") my_float = 0.5 my_integer = 7 my_negative",
"item) print(\"Another way to enumerate using a list 'method'...\") for item in my_list:",
"1, \"m\"), Person(\"Tina\", 32, \"f\"), Person(\"Abby\", 14, \"f\"), Person(\"Adah\", 13, \"f\"), Person(\"Sebastian\", 42,",
"30, \"m\"), Person(\"Ping\", 1, \"m\"), Person(\"Tina\", 32, \"f\"), Person(\"Abby\", 14, \"f\"), Person(\"Adah\", 13,",
"product) print (\"\\n\") #List my_list = [1, 2, 3, \"a\", \"b\", \"c\"] print(\"my_list",
"in my_list: index = my_list.index(item) print(\"item\", item, \"has index\", index) #List Comprehensions numbers_0_to_9",
"This example uses a data type called a namedtuple which is similar to",
"for right_num in range(10): product = left_num * right_num print(left_num, \"x\", right_num, \"=\",",
"b = rd.random() c = rd.random() print(\"a is\", a) print(\"b is\", b) print(\"c",
"\"gender\"]) people = [ Person(\"Andy\", 30, \"m\"), Person(\"Ping\", 1, \"m\"), Person(\"Tina\", 32, \"f\"),",
"(my_fraction == my_float) print(\"The absolute value of\", my_negative, \"is\", abs(my_negative)) print(my_integer, \"squared is",
"x for x in range(10)] print(\"Squares \", squares) odds = [x for x",
"import random as rd a = rd.random() b = rd.random() c = rd.random()",
"[1, 2, 3, \"a\", \"b\", \"c\"] print(\"my_list is:\", my_list) print(\"Enumerating a list...\") for",
"enclosed in triple quotes. I'd write them here, but that would end the",
"equal to\", my_integer ** 2) print(\"Does\", my_fraction, \"equal\", my_float, \"?\", does_half_equal_point_five) for left_num",
"random as rd a = rd.random() b = rd.random() c = rd.random() print(\"a",
"odds = [x for x in range(10) if x % 2 == 1]",
"[\"name\", \"age\", \"gender\"]) people = [ Person(\"Andy\", 30, \"m\"), Person(\"Ping\", 1, \"m\"), Person(\"Tina\",",
"andy.gender) # now let's show what we can do with a list comprehension",
"assign to the variable # does_half_equal_point_five? does_half_equal_point_five = (my_fraction == my_float) print(\"The absolute",
"namedtuple works. andy = people[0] print(\"name: \", andy.name) print(\"age: \", andy.age) print(\"gender:\", andy.gender)",
"= left_num * right_num print(left_num, \"x\", right_num, \"=\", product) print (\"\\n\") #List my_list",
"string! I know! I'll use a slash as an escape character. Triple quotes",
"an escape character. Triple quotes look like this: \\\"\\\"\\\" Sincerely, Python \"\"\" newline_character",
"do with a list comprehension # male_names = [person.name for person in people",
"print(my_multiline_string) print(newline_character) print(\"-----------\") print(newline_character) # NUMBERS AND BOOLEANS print(\"NUMBERS\") my_float = 0.5 my_integer",
"does_half_equal_point_five) for left_num in range(10): for right_num in range(10): product = left_num *",
": $Id$ import os # STRINGS print(\"STRINGS\") my_string_1 = \"hello\" my_string_2 = 'world'",
"if x % 2 == 1] print(\"Odds \", odds) # This example uses",
"my_multiline_string = \"\"\" Dear World, Hello. I am a multiline python string. I'm",
"Triple quotes look like this: \\\"\\\"\\\" Sincerely, Python \"\"\" newline_character = \"\\n\" print(my_string_1,",
"quotes look like this: \\\"\\\"\\\" Sincerely, Python \"\"\" newline_character = \"\\n\" print(my_string_1, my_string_2)",
"\", andy.age) print(\"gender:\", andy.gender) # now let's show what we can do with",
"import os # STRINGS print(\"STRINGS\") my_string_1 = \"hello\" my_string_2 = 'world' my_multiline_string =",
"teen_names = [p.name for p in people if 13 <= p.age <= 18",
"to the variable # does_half_equal_point_five? does_half_equal_point_five = (my_fraction == my_float) print(\"The absolute value",
"a namedtuple which is similar to a struct data type in other languages.",
"print(my_integer, \"squared is equal to\", my_integer ** 2) print(\"Does\", my_fraction, \"equal\", my_float, \"?\",",
"right_num, \"=\", product) print (\"\\n\") #List my_list = [1, 2, 3, \"a\", \"b\",",
"squares = [x * x for x in range(10)] print(\"Squares \", squares) odds",
"people if person.gender==\"m\"] print(\"Male names:\", male_names) teen_names = [p.name for p in people",
"= [x for x in range(10) if x % 2 == 1] print(\"Odds",
"my_list.index(item) print(\"item\", item, \"has index\", index) #List Comprehensions numbers_0_to_9 = [x for x",
"my_list: index = my_list.index(item) print(\"item\", item, \"has index\", index) #List Comprehensions numbers_0_to_9 =",
"print(\"Another way to enumerate using a list 'method'...\") for item in my_list: index",
"newline_character = \"\\n\" print(my_string_1, my_string_2) print(my_multiline_string) print(newline_character) print(\"-----------\") print(newline_character) # NUMBERS AND BOOLEANS",
"number\", i, \"is\", item) print(\"Another way to enumerate using a list 'method'...\") for",
"person.gender==\"m\"] print(\"Male names:\", male_names) teen_names = [p.name for p in people if 13",
"#List my_list = [1, 2, 3, \"a\", \"b\", \"c\"] print(\"my_list is:\", my_list) print(\"Enumerating",
"is:\", my_list) print(\"Enumerating a list...\") for i, item in enumerate(my_list): print(\"item number\", i,",
"to 9\", numbers_0_to_9) squares = [x * x for x in range(10)] print(\"Squares",
"let's show how this namedtuple works. andy = people[0] print(\"name: \", andy.name) print(\"age:",
"collections import namedtuple Person = namedtuple(\"Person\", [\"name\", \"age\", \"gender\"]) people = [ Person(\"Andy\",",
"= namedtuple(\"Person\", [\"name\", \"age\", \"gender\"]) people = [ Person(\"Andy\", 30, \"m\"), Person(\"Ping\", 1,",
"absolute value of\", my_negative, \"is\", abs(my_negative)) print(my_integer, \"squared is equal to\", my_integer **",
"42, \"m\"), Person(\"Carol\" , 68, \"f\"), ] # first, let's show how this",
"= (my_fraction == my_float) print(\"The absolute value of\", my_negative, \"is\", abs(my_negative)) print(my_integer, \"squared",
"can do with a list comprehension # male_names = [person.name for person in",
"enumerate using a list 'method'...\") for item in my_list: index = my_list.index(item) print(\"item\",",
"my_float, \"?\", does_half_equal_point_five) for left_num in range(10): for right_num in range(10): product =",
"= [x for x in range(10)] print(\"Numbers 0 to 9\", numbers_0_to_9) squares =",
"os # STRINGS print(\"STRINGS\") my_string_1 = \"hello\" my_string_2 = 'world' my_multiline_string = \"\"\"",
"in range(10): product = left_num * right_num print(left_num, \"x\", right_num, \"=\", product) print",
"print(\"Teen names:\", teen_names) # random import random as rd a = rd.random() b",
"type in other languages. from collections import namedtuple Person = namedtuple(\"Person\", [\"name\", \"age\",",
"a struct data type in other languages. from collections import namedtuple Person =",
"my_string_2) print(my_multiline_string) print(newline_character) print(\"-----------\") print(newline_character) # NUMBERS AND BOOLEANS print(\"NUMBERS\") my_float = 0.5",
"World, Hello. I am a multiline python string. I'm enclosed in triple quotes.",
"print(\"Squares \", squares) odds = [x for x in range(10) if x %",
"# @Date : 2017-10-14 19:45:05 # @Author : jingray (<EMAIL>) # @Link :",
"as rd a = rd.random() b = rd.random() c = rd.random() print(\"a is\",",
"# @Version : $Id$ import os # STRINGS print(\"STRINGS\") my_string_1 = \"hello\" my_string_2",
"print (\"\\n\") #List my_list = [1, 2, 3, \"a\", \"b\", \"c\"] print(\"my_list is:\",",
"data type called a namedtuple which is similar to a struct data type",
"# male_names = [person.name for person in people if person.gender==\"m\"] print(\"Male names:\", male_names)",
"print(\"Numbers 0 to 9\", numbers_0_to_9) squares = [x * x for x in",
"quotes. I'd write them here, but that would end the string! I know!",
"in people if 13 <= p.age <= 18 ] print(\"Teen names:\", teen_names) #",
"left_num in range(10): for right_num in range(10): product = left_num * right_num print(left_num,",
"namedtuple Person = namedtuple(\"Person\", [\"name\", \"age\", \"gender\"]) people = [ Person(\"Andy\", 30, \"m\"),",
"python string. I'm enclosed in triple quotes. I'd write them here, but that",
"does_half_equal_point_five = (my_fraction == my_float) print(\"The absolute value of\", my_negative, \"is\", abs(my_negative)) print(my_integer,",
"if person.gender==\"m\"] print(\"Male names:\", male_names) teen_names = [p.name for p in people if",
"\"b\", \"c\"] print(\"my_list is:\", my_list) print(\"Enumerating a list...\") for i, item in enumerate(my_list):",
"\"x\", right_num, \"=\", product) print (\"\\n\") #List my_list = [1, 2, 3, \"a\",",
"enumerate(my_list): print(\"item number\", i, \"is\", item) print(\"Another way to enumerate using a list",
"== my_float) print(\"The absolute value of\", my_negative, \"is\", abs(my_negative)) print(my_integer, \"squared is equal",
"@Author : jingray (<EMAIL>) # @Link : http://www.jianshu.com/u/01fb0364467d # @Version : $Id$ import",
"18 ] print(\"Teen names:\", teen_names) # random import random as rd a =",
"# @Link : http://www.jianshu.com/u/01fb0364467d # @Version : $Id$ import os # STRINGS print(\"STRINGS\")",
"14, \"f\"), Person(\"Adah\", 13, \"f\"), Person(\"Sebastian\", 42, \"m\"), Person(\"Carol\" , 68, \"f\"), ]",
"for i, item in enumerate(my_list): print(\"item number\", i, \"is\", item) print(\"Another way to",
"rd.random() c = rd.random() print(\"a is\", a) print(\"b is\", b) print(\"c is\", c)",
"'world' my_multiline_string = \"\"\" Dear World, Hello. I am a multiline python string.",
"my_negative = -3.5 my_fraction = 1/2 # what do you think THIS line",
"] print(\"Teen names:\", teen_names) # random import random as rd a = rd.random()",
"a list 'method'...\") for item in my_list: index = my_list.index(item) print(\"item\", item, \"has",
"I'm enclosed in triple quotes. I'd write them here, but that would end",
"numbers_0_to_9 = [x for x in range(10)] print(\"Numbers 0 to 9\", numbers_0_to_9) squares",
"\"m\"), Person(\"Tina\", 32, \"f\"), Person(\"Abby\", 14, \"f\"), Person(\"Adah\", 13, \"f\"), Person(\"Sebastian\", 42, \"m\"),",
"am a multiline python string. I'm enclosed in triple quotes. I'd write them",
"# now let's show what we can do with a list comprehension #",
"range(10) if x % 2 == 1] print(\"Odds \", odds) # This example",
"utf-8 -*- # @Date : 2017-10-14 19:45:05 # @Author : jingray (<EMAIL>) #",
"[ Person(\"Andy\", 30, \"m\"), Person(\"Ping\", 1, \"m\"), Person(\"Tina\", 32, \"f\"), Person(\"Abby\", 14, \"f\"),",
"is similar to a struct data type in other languages. from collections import",
"= rd.random() c = rd.random() print(\"a is\", a) print(\"b is\", b) print(\"c is\",",
"names:\", teen_names) # random import random as rd a = rd.random() b =",
"\"a\", \"b\", \"c\"] print(\"my_list is:\", my_list) print(\"Enumerating a list...\") for i, item in",
"item in my_list: index = my_list.index(item) print(\"item\", item, \"has index\", index) #List Comprehensions",
"item, \"has index\", index) #List Comprehensions numbers_0_to_9 = [x for x in range(10)]",
"end the string! I know! I'll use a slash as an escape character.",
"index\", index) #List Comprehensions numbers_0_to_9 = [x for x in range(10)] print(\"Numbers 0",
"range(10): for right_num in range(10): product = left_num * right_num print(left_num, \"x\", right_num,",
"print(\"gender:\", andy.gender) # now let's show what we can do with a list",
": http://www.jianshu.com/u/01fb0364467d # @Version : $Id$ import os # STRINGS print(\"STRINGS\") my_string_1 =",
"the variable # does_half_equal_point_five? does_half_equal_point_five = (my_fraction == my_float) print(\"The absolute value of\",",
"print(\"NUMBERS\") my_float = 0.5 my_integer = 7 my_negative = -3.5 my_fraction = 1/2",
"my_string_2 = 'world' my_multiline_string = \"\"\" Dear World, Hello. I am a multiline",
"x in range(10) if x % 2 == 1] print(\"Odds \", odds) #",
"what do you think THIS line of code will assign to the variable",
"we can do with a list comprehension # male_names = [person.name for person",
"for p in people if 13 <= p.age <= 18 ] print(\"Teen names:\",",
"Dear World, Hello. I am a multiline python string. I'm enclosed in triple",
"# This example uses a data type called a namedtuple which is similar",
"list 'method'...\") for item in my_list: index = my_list.index(item) print(\"item\", item, \"has index\",",
"-3.5 my_fraction = 1/2 # what do you think THIS line of code",
"print(my_string_1, my_string_2) print(my_multiline_string) print(newline_character) print(\"-----------\") print(newline_character) # NUMBERS AND BOOLEANS print(\"NUMBERS\") my_float =",
"\"f\"), Person(\"Abby\", 14, \"f\"), Person(\"Adah\", 13, \"f\"), Person(\"Sebastian\", 42, \"m\"), Person(\"Carol\" , 68,",
"= people[0] print(\"name: \", andy.name) print(\"age: \", andy.age) print(\"gender:\", andy.gender) # now let's",
"= \"\\n\" print(my_string_1, my_string_2) print(my_multiline_string) print(newline_character) print(\"-----------\") print(newline_character) # NUMBERS AND BOOLEANS print(\"NUMBERS\")",
"print(left_num, \"x\", right_num, \"=\", product) print (\"\\n\") #List my_list = [1, 2, 3,",
"range(10)] print(\"Numbers 0 to 9\", numbers_0_to_9) squares = [x * x for x",
"in range(10): for right_num in range(10): product = left_num * right_num print(left_num, \"x\",",
"range(10)] print(\"Squares \", squares) odds = [x for x in range(10) if x",
"for left_num in range(10): for right_num in range(10): product = left_num * right_num",
"line of code will assign to the variable # does_half_equal_point_five? does_half_equal_point_five = (my_fraction",
"print(\"age: \", andy.age) print(\"gender:\", andy.gender) # now let's show what we can do",
"print(\"The absolute value of\", my_negative, \"is\", abs(my_negative)) print(my_integer, \"squared is equal to\", my_integer",
"= 7 my_negative = -3.5 my_fraction = 1/2 # what do you think",
"Person = namedtuple(\"Person\", [\"name\", \"age\", \"gender\"]) people = [ Person(\"Andy\", 30, \"m\"), Person(\"Ping\",",
"write them here, but that would end the string! I know! I'll use",
"my_float) print(\"The absolute value of\", my_negative, \"is\", abs(my_negative)) print(my_integer, \"squared is equal to\",",
"uses a data type called a namedtuple which is similar to a struct",
"<= p.age <= 18 ] print(\"Teen names:\", teen_names) # random import random as",
"\"is\", abs(my_negative)) print(my_integer, \"squared is equal to\", my_integer ** 2) print(\"Does\", my_fraction, \"equal\",",
"-*- coding: utf-8 -*- # @Date : 2017-10-14 19:45:05 # @Author : jingray",
"coding: utf-8 -*- # @Date : 2017-10-14 19:45:05 # @Author : jingray (<EMAIL>)",
"I'd write them here, but that would end the string! I know! I'll",
"male_names) teen_names = [p.name for p in people if 13 <= p.age <=",
"would end the string! I know! I'll use a slash as an escape",
"= my_list.index(item) print(\"item\", item, \"has index\", index) #List Comprehensions numbers_0_to_9 = [x for",
"my_fraction, \"equal\", my_float, \"?\", does_half_equal_point_five) for left_num in range(10): for right_num in range(10):",
"$Id$ import os # STRINGS print(\"STRINGS\") my_string_1 = \"hello\" my_string_2 = 'world' my_multiline_string",
"the string! I know! I'll use a slash as an escape character. Triple",
"= rd.random() b = rd.random() c = rd.random() print(\"a is\", a) print(\"b is\",",
"Python \"\"\" newline_character = \"\\n\" print(my_string_1, my_string_2) print(my_multiline_string) print(newline_character) print(\"-----------\") print(newline_character) # NUMBERS",
", 68, \"f\"), ] # first, let's show how this namedtuple works. andy",
"escape character. Triple quotes look like this: \\\"\\\"\\\" Sincerely, Python \"\"\" newline_character =",
"works. andy = people[0] print(\"name: \", andy.name) print(\"age: \", andy.age) print(\"gender:\", andy.gender) #",
"that would end the string! I know! I'll use a slash as an",
"# does_half_equal_point_five? does_half_equal_point_five = (my_fraction == my_float) print(\"The absolute value of\", my_negative, \"is\",",
"\"age\", \"gender\"]) people = [ Person(\"Andy\", 30, \"m\"), Person(\"Ping\", 1, \"m\"), Person(\"Tina\", 32,",
"code will assign to the variable # does_half_equal_point_five? does_half_equal_point_five = (my_fraction == my_float)",
"\"\"\" newline_character = \"\\n\" print(my_string_1, my_string_2) print(my_multiline_string) print(newline_character) print(\"-----------\") print(newline_character) # NUMBERS AND",
"Person(\"Andy\", 30, \"m\"), Person(\"Ping\", 1, \"m\"), Person(\"Tina\", 32, \"f\"), Person(\"Abby\", 14, \"f\"), Person(\"Adah\",",
"value of\", my_negative, \"is\", abs(my_negative)) print(my_integer, \"squared is equal to\", my_integer ** 2)",
"rd.random() b = rd.random() c = rd.random() print(\"a is\", a) print(\"b is\", b)",
"x % 2 == 1] print(\"Odds \", odds) # This example uses a",
"Sincerely, Python \"\"\" newline_character = \"\\n\" print(my_string_1, my_string_2) print(my_multiline_string) print(newline_character) print(\"-----------\") print(newline_character) #",
"people[0] print(\"name: \", andy.name) print(\"age: \", andy.age) print(\"gender:\", andy.gender) # now let's show",
"\"f\"), Person(\"Adah\", 13, \"f\"), Person(\"Sebastian\", 42, \"m\"), Person(\"Carol\" , 68, \"f\"), ] #",
"show how this namedtuple works. andy = people[0] print(\"name: \", andy.name) print(\"age: \",",
"this: \\\"\\\"\\\" Sincerely, Python \"\"\" newline_character = \"\\n\" print(my_string_1, my_string_2) print(my_multiline_string) print(newline_character) print(\"-----------\")",
"print(\"item\", item, \"has index\", index) #List Comprehensions numbers_0_to_9 = [x for x in",
"p in people if 13 <= p.age <= 18 ] print(\"Teen names:\", teen_names)",
"\"?\", does_half_equal_point_five) for left_num in range(10): for right_num in range(10): product = left_num",
"[person.name for person in people if person.gender==\"m\"] print(\"Male names:\", male_names) teen_names = [p.name",
"p.age <= 18 ] print(\"Teen names:\", teen_names) # random import random as rd",
"= [1, 2, 3, \"a\", \"b\", \"c\"] print(\"my_list is:\", my_list) print(\"Enumerating a list...\")",
"this namedtuple works. andy = people[0] print(\"name: \", andy.name) print(\"age: \", andy.age) print(\"gender:\",",
"example uses a data type called a namedtuple which is similar to a",
"13, \"f\"), Person(\"Sebastian\", 42, \"m\"), Person(\"Carol\" , 68, \"f\"), ] # first, let's",
"index) #List Comprehensions numbers_0_to_9 = [x for x in range(10)] print(\"Numbers 0 to",
"#!/usr/bin/env python # -*- coding: utf-8 -*- # @Date : 2017-10-14 19:45:05 #",
"# STRINGS print(\"STRINGS\") my_string_1 = \"hello\" my_string_2 = 'world' my_multiline_string = \"\"\" Dear",
"\"is\", item) print(\"Another way to enumerate using a list 'method'...\") for item in",
"1] print(\"Odds \", odds) # This example uses a data type called a",
"= 0.5 my_integer = 7 my_negative = -3.5 my_fraction = 1/2 # what",
"male_names = [person.name for person in people if person.gender==\"m\"] print(\"Male names:\", male_names) teen_names",
"range(10): product = left_num * right_num print(left_num, \"x\", right_num, \"=\", product) print (\"\\n\")",
"\"m\"), Person(\"Carol\" , 68, \"f\"), ] # first, let's show how this namedtuple",
"13 <= p.age <= 18 ] print(\"Teen names:\", teen_names) # random import random",
"a list comprehension # male_names = [person.name for person in people if person.gender==\"m\"]",
"\"f\"), Person(\"Sebastian\", 42, \"m\"), Person(\"Carol\" , 68, \"f\"), ] # first, let's show",
"multiline python string. I'm enclosed in triple quotes. I'd write them here, but",
"you think THIS line of code will assign to the variable # does_half_equal_point_five?",
"string. I'm enclosed in triple quotes. I'd write them here, but that would",
"@Link : http://www.jianshu.com/u/01fb0364467d # @Version : $Id$ import os # STRINGS print(\"STRINGS\") my_string_1",
"print(\"Enumerating a list...\") for i, item in enumerate(my_list): print(\"item number\", i, \"is\", item)",
"andy.age) print(\"gender:\", andy.gender) # now let's show what we can do with a",
"show what we can do with a list comprehension # male_names = [person.name",
"32, \"f\"), Person(\"Abby\", 14, \"f\"), Person(\"Adah\", 13, \"f\"), Person(\"Sebastian\", 42, \"m\"), Person(\"Carol\" ,",
"but that would end the string! I know! I'll use a slash as",
"\"m\"), Person(\"Ping\", 1, \"m\"), Person(\"Tina\", 32, \"f\"), Person(\"Abby\", 14, \"f\"), Person(\"Adah\", 13, \"f\"),",
"my_string_1 = \"hello\" my_string_2 = 'world' my_multiline_string = \"\"\" Dear World, Hello. I",
"in range(10) if x % 2 == 1] print(\"Odds \", odds) # This",
"2) print(\"Does\", my_fraction, \"equal\", my_float, \"?\", does_half_equal_point_five) for left_num in range(10): for right_num",
"look like this: \\\"\\\"\\\" Sincerely, Python \"\"\" newline_character = \"\\n\" print(my_string_1, my_string_2) print(my_multiline_string)",
"* right_num print(left_num, \"x\", right_num, \"=\", product) print (\"\\n\") #List my_list = [1,",
"Hello. I am a multiline python string. I'm enclosed in triple quotes. I'd",
"abs(my_negative)) print(my_integer, \"squared is equal to\", my_integer ** 2) print(\"Does\", my_fraction, \"equal\", my_float,",
"STRINGS print(\"STRINGS\") my_string_1 = \"hello\" my_string_2 = 'world' my_multiline_string = \"\"\" Dear World,",
"namedtuple which is similar to a struct data type in other languages. from",
": 2017-10-14 19:45:05 # @Author : jingray (<EMAIL>) # @Link : http://www.jianshu.com/u/01fb0364467d #",
"(\"\\n\") #List my_list = [1, 2, 3, \"a\", \"b\", \"c\"] print(\"my_list is:\", my_list)",
"i, \"is\", item) print(\"Another way to enumerate using a list 'method'...\") for item",
"Comprehensions numbers_0_to_9 = [x for x in range(10)] print(\"Numbers 0 to 9\", numbers_0_to_9)",
"3, \"a\", \"b\", \"c\"] print(\"my_list is:\", my_list) print(\"Enumerating a list...\") for i, item",
"data type in other languages. from collections import namedtuple Person = namedtuple(\"Person\", [\"name\",",
"print(\"my_list is:\", my_list) print(\"Enumerating a list...\") for i, item in enumerate(my_list): print(\"item number\",",
"to enumerate using a list 'method'...\") for item in my_list: index = my_list.index(item)",
"[p.name for p in people if 13 <= p.age <= 18 ] print(\"Teen",
"'method'...\") for item in my_list: index = my_list.index(item) print(\"item\", item, \"has index\", index)",
"I'll use a slash as an escape character. Triple quotes look like this:",
"for item in my_list: index = my_list.index(item) print(\"item\", item, \"has index\", index) #List",
"will assign to the variable # does_half_equal_point_five? does_half_equal_point_five = (my_fraction == my_float) print(\"The",
"= [ Person(\"Andy\", 30, \"m\"), Person(\"Ping\", 1, \"m\"), Person(\"Tina\", 32, \"f\"), Person(\"Abby\", 14,",
"print(\"Odds \", odds) # This example uses a data type called a namedtuple",
"in range(10)] print(\"Numbers 0 to 9\", numbers_0_to_9) squares = [x * x for",
"like this: \\\"\\\"\\\" Sincerely, Python \"\"\" newline_character = \"\\n\" print(my_string_1, my_string_2) print(my_multiline_string) print(newline_character)",
"-*- # @Date : 2017-10-14 19:45:05 # @Author : jingray (<EMAIL>) # @Link",
"my_negative, \"is\", abs(my_negative)) print(my_integer, \"squared is equal to\", my_integer ** 2) print(\"Does\", my_fraction,",
"my_integer ** 2) print(\"Does\", my_fraction, \"equal\", my_float, \"?\", does_half_equal_point_five) for left_num in range(10):",
"with a list comprehension # male_names = [person.name for person in people if",
"product = left_num * right_num print(left_num, \"x\", right_num, \"=\", product) print (\"\\n\") #List",
"a = rd.random() b = rd.random() c = rd.random() print(\"a is\", a) print(\"b",
"print(\"Male names:\", male_names) teen_names = [p.name for p in people if 13 <=",
"know! I'll use a slash as an escape character. Triple quotes look like",
"variable # does_half_equal_point_five? does_half_equal_point_five = (my_fraction == my_float) print(\"The absolute value of\", my_negative,",
"\"f\"), ] # first, let's show how this namedtuple works. andy = people[0]",
"\", odds) # This example uses a data type called a namedtuple which",
"2017-10-14 19:45:05 # @Author : jingray (<EMAIL>) # @Link : http://www.jianshu.com/u/01fb0364467d # @Version",
"person in people if person.gender==\"m\"] print(\"Male names:\", male_names) teen_names = [p.name for p",
"people if 13 <= p.age <= 18 ] print(\"Teen names:\", teen_names) # random",
"a multiline python string. I'm enclosed in triple quotes. I'd write them here,",
"my_integer = 7 my_negative = -3.5 my_fraction = 1/2 # what do you",
"for x in range(10) if x % 2 == 1] print(\"Odds \", odds)",
"print(\"item number\", i, \"is\", item) print(\"Another way to enumerate using a list 'method'...\")",
"using a list 'method'...\") for item in my_list: index = my_list.index(item) print(\"item\", item,",
"# @Author : jingray (<EMAIL>) # @Link : http://www.jianshu.com/u/01fb0364467d # @Version : $Id$",
"= 1/2 # what do you think THIS line of code will assign",
"what we can do with a list comprehension # male_names = [person.name for",
"which is similar to a struct data type in other languages. from collections",
"Person(\"Carol\" , 68, \"f\"), ] # first, let's show how this namedtuple works.",
"I am a multiline python string. I'm enclosed in triple quotes. I'd write",
"\"equal\", my_float, \"?\", does_half_equal_point_five) for left_num in range(10): for right_num in range(10): product",
"= \"\"\" Dear World, Hello. I am a multiline python string. I'm enclosed",
"\"squared is equal to\", my_integer ** 2) print(\"Does\", my_fraction, \"equal\", my_float, \"?\", does_half_equal_point_five)",
"* x for x in range(10)] print(\"Squares \", squares) odds = [x for",
"\"\"\" Dear World, Hello. I am a multiline python string. I'm enclosed in",
"in people if person.gender==\"m\"] print(\"Male names:\", male_names) teen_names = [p.name for p in",
"in other languages. from collections import namedtuple Person = namedtuple(\"Person\", [\"name\", \"age\", \"gender\"])",
"\"hello\" my_string_2 = 'world' my_multiline_string = \"\"\" Dear World, Hello. I am a",
"I know! I'll use a slash as an escape character. Triple quotes look",
"print(\"Does\", my_fraction, \"equal\", my_float, \"?\", does_half_equal_point_five) for left_num in range(10): for right_num in",
"Person(\"Sebastian\", 42, \"m\"), Person(\"Carol\" , 68, \"f\"), ] # first, let's show how",
"i, item in enumerate(my_list): print(\"item number\", i, \"is\", item) print(\"Another way to enumerate",
"THIS line of code will assign to the variable # does_half_equal_point_five? does_half_equal_point_five =",
"# what do you think THIS line of code will assign to the",
"python # -*- coding: utf-8 -*- # @Date : 2017-10-14 19:45:05 # @Author",
"\", andy.name) print(\"age: \", andy.age) print(\"gender:\", andy.gender) # now let's show what we",
"9\", numbers_0_to_9) squares = [x * x for x in range(10)] print(\"Squares \",",
"\"has index\", index) #List Comprehensions numbers_0_to_9 = [x for x in range(10)] print(\"Numbers",
"squares) odds = [x for x in range(10) if x % 2 ==",
"# -*- coding: utf-8 -*- # @Date : 2017-10-14 19:45:05 # @Author :",
"let's show what we can do with a list comprehension # male_names =",
"odds) # This example uses a data type called a namedtuple which is",
"\"=\", product) print (\"\\n\") #List my_list = [1, 2, 3, \"a\", \"b\", \"c\"]",
"how this namedtuple works. andy = people[0] print(\"name: \", andy.name) print(\"age: \", andy.age)",
"teen_names) # random import random as rd a = rd.random() b = rd.random()",
"way to enumerate using a list 'method'...\") for item in my_list: index =",
"= [p.name for p in people if 13 <= p.age <= 18 ]",
"similar to a struct data type in other languages. from collections import namedtuple",
"Person(\"Tina\", 32, \"f\"), Person(\"Abby\", 14, \"f\"), Person(\"Adah\", 13, \"f\"), Person(\"Sebastian\", 42, \"m\"), Person(\"Carol\"",
"0.5 my_integer = 7 my_negative = -3.5 my_fraction = 1/2 # what do",
"called a namedtuple which is similar to a struct data type in other",
"= [person.name for person in people if person.gender==\"m\"] print(\"Male names:\", male_names) teen_names =",
"of\", my_negative, \"is\", abs(my_negative)) print(my_integer, \"squared is equal to\", my_integer ** 2) print(\"Does\",",
"[x * x for x in range(10)] print(\"Squares \", squares) odds = [x",
"\"c\"] print(\"my_list is:\", my_list) print(\"Enumerating a list...\") for i, item in enumerate(my_list): print(\"item",
"for person in people if person.gender==\"m\"] print(\"Male names:\", male_names) teen_names = [p.name for",
"<= 18 ] print(\"Teen names:\", teen_names) # random import random as rd a",
"Person(\"Adah\", 13, \"f\"), Person(\"Sebastian\", 42, \"m\"), Person(\"Carol\" , 68, \"f\"), ] # first,",
"Person(\"Abby\", 14, \"f\"), Person(\"Adah\", 13, \"f\"), Person(\"Sebastian\", 42, \"m\"), Person(\"Carol\" , 68, \"f\"),",
"andy.name) print(\"age: \", andy.age) print(\"gender:\", andy.gender) # now let's show what we can",
"character. Triple quotes look like this: \\\"\\\"\\\" Sincerely, Python \"\"\" newline_character = \"\\n\"",
"of code will assign to the variable # does_half_equal_point_five? does_half_equal_point_five = (my_fraction ==",
"item in enumerate(my_list): print(\"item number\", i, \"is\", item) print(\"Another way to enumerate using",
"in enumerate(my_list): print(\"item number\", i, \"is\", item) print(\"Another way to enumerate using a",
"comprehension # male_names = [person.name for person in people if person.gender==\"m\"] print(\"Male names:\",",
"right_num print(left_num, \"x\", right_num, \"=\", product) print (\"\\n\") #List my_list = [1, 2,",
"as an escape character. Triple quotes look like this: \\\"\\\"\\\" Sincerely, Python \"\"\"",
"now let's show what we can do with a list comprehension # male_names",
"right_num in range(10): product = left_num * right_num print(left_num, \"x\", right_num, \"=\", product)",
"here, but that would end the string! I know! I'll use a slash",
"= [x * x for x in range(10)] print(\"Squares \", squares) odds =",
"68, \"f\"), ] # first, let's show how this namedtuple works. andy =",
"@Version : $Id$ import os # STRINGS print(\"STRINGS\") my_string_1 = \"hello\" my_string_2 =",
"7 my_negative = -3.5 my_fraction = 1/2 # what do you think THIS",
"use a slash as an escape character. Triple quotes look like this: \\\"\\\"\\\"",
"random import random as rd a = rd.random() b = rd.random() c =",
"http://www.jianshu.com/u/01fb0364467d # @Version : $Id$ import os # STRINGS print(\"STRINGS\") my_string_1 = \"hello\"",
"in range(10)] print(\"Squares \", squares) odds = [x for x in range(10) if",
"think THIS line of code will assign to the variable # does_half_equal_point_five? does_half_equal_point_five",
"list...\") for i, item in enumerate(my_list): print(\"item number\", i, \"is\", item) print(\"Another way",
"# first, let's show how this namedtuple works. andy = people[0] print(\"name: \",",
"first, let's show how this namedtuple works. andy = people[0] print(\"name: \", andy.name)",
"jingray (<EMAIL>) # @Link : http://www.jianshu.com/u/01fb0364467d # @Version : $Id$ import os #",
"andy = people[0] print(\"name: \", andy.name) print(\"age: \", andy.age) print(\"gender:\", andy.gender) # now",
"left_num * right_num print(left_num, \"x\", right_num, \"=\", product) print (\"\\n\") #List my_list =",
"AND BOOLEANS print(\"NUMBERS\") my_float = 0.5 my_integer = 7 my_negative = -3.5 my_fraction"
] |
[
": 7040.000, \"A#8\" : 7458.620, \"B8\" : 7902.133, \"C9\" : 8372.018, \"C#9\" :",
"58.270, \"B1\" : 61.735, \"C2\" : 65.406, \"C#2\" : 69.296, \"D2\" : 73.416,",
"174.614, \"F#3\" : 184.997, \"G3\" : 195.998, \"G#3\" : 207.652, \"A3\" : 220.000,",
"18.354, \"D#0\" : 19.445, \"E0\" : 20.602, \"F0\" : 21.827, \"F#0\" : 23.125,",
"466.164, \"B4\" : 493.883, \"C5\" : 523.251, \"C#5\" : 554.365, \"D5\" : 587.330,",
"3951.066, \"C8\" : 4186.009, \"C#8\" : 4434.922, \"D8\" : 4698.636, \"D#8\" : 4978.032,",
"\"F8\" : 5587.652, \"F#8\" : 5919.911, \"G8\" : 6271.927, \"G#8\" : 6644.875, \"A8\"",
": 13.750, \"A#-1\": 14.568, \"B-1\" : 15.434, \"C0\" : 16.352, \"C#0\" : 17.324,",
"\"D#4\" : 311.127, \"E4\" : 329.628, \"F4\" : 349.228, \"F#4\" : 369.994, \"G4\"",
": 10.301, \"F-1\" : 10.913, \"F#-1\": 11.562, \"G-1\" : 12.250, \"G#-1\": 12.978, \"A-1\"",
"\"G#5\" : 830.609, \"A5\" : 880.000, \"A#5\" : 932.328, \"B5\" : 987.767, \"C6\"",
"\"A#8\" : 7458.620, \"B8\" : 7902.133, \"C9\" : 8372.018, \"C#9\" : 8869.844, \"D9\"",
": 6271.927, \"G#8\" : 6644.875, \"A8\" : 7040.000, \"A#8\" : 7458.620, \"B8\" :",
"{ \"C-1\" : 8.176, \"C#-1\": 8.662, \"D-1\" : 9.177, \"D#-1\": 9.723, \"E-1\" :",
": 61.735, \"C2\" : 65.406, \"C#2\" : 69.296, \"D2\" : 73.416, \"D#2\" :",
": 65.406, \"C#2\" : 69.296, \"D2\" : 73.416, \"D#2\" : 77.782, \"E2\" :",
"\"D#1\" : 38.891, \"E1\" : 41.203, \"F1\" : 43.654, \"F#1\" : 46.249, \"G1\"",
"103.826, \"A2\" : 110.000, \"A#2\" : 116.541, \"B2\" : 123.471, \"C3\" : 130.813,",
"\"E3\" : 164.814, \"F3\" : 174.614, \"F#3\" : 184.997, \"G3\" : 195.998, \"G#3\"",
"138.591, \"D3\" : 146.832, \"D#3\" : 155.563, \"E3\" : 164.814, \"F3\" : 174.614,",
"16.352, \"C#0\" : 17.324, \"D0\" : 18.354, \"D#0\" : 19.445, \"E0\" : 20.602,",
"5919.911, \"G8\" : 6271.927, \"G#8\" : 6644.875, \"A8\" : 7040.000, \"A#8\" : 7458.620,",
": 293.665, \"D#4\" : 311.127, \"E4\" : 329.628, \"F4\" : 349.228, \"F#4\" :",
"\"C#1\" : 34.648, \"D1\" : 36.708, \"D#1\" : 38.891, \"E1\" : 41.203, \"F1\"",
"\"A#4\" : 466.164, \"B4\" : 493.883, \"C5\" : 523.251, \"C#5\" : 554.365, \"D5\"",
"32.703, \"C#1\" : 34.648, \"D1\" : 36.708, \"D#1\" : 38.891, \"E1\" : 41.203,",
"\"A3\" : 220.000, \"A#3\" : 233.082, \"B3\" : 246.942, \"C4\" : 261.626, \"C#4\"",
"\"C#0\" : 17.324, \"D0\" : 18.354, \"D#0\" : 19.445, \"E0\" : 20.602, \"F0\"",
"\"E1\" : 41.203, \"F1\" : 43.654, \"F#1\" : 46.249, \"G1\" : 48.999, \"G#1\"",
": 2349.318, \"D#7\" : 2489.016, \"E7\" : 2637.020, \"F7\" : 2793.826, \"F#7\" :",
"4698.636, \"D#8\" : 4978.032, \"E8\" : 5274.041, \"F8\" : 5587.652, \"F#8\" : 5919.911,",
"36.708, \"D#1\" : 38.891, \"E1\" : 41.203, \"F1\" : 43.654, \"F#1\" : 46.249,",
"7458.620, \"B8\" : 7902.133, \"C9\" : 8372.018, \"C#9\" : 8869.844, \"D9\" : 9397.273,",
"12.978, \"A-1\" : 13.750, \"A#-1\": 14.568, \"B-1\" : 15.434, \"C0\" : 16.352, \"C#0\"",
"155.563, \"E3\" : 164.814, \"F3\" : 174.614, \"F#3\" : 184.997, \"G3\" : 195.998,",
"\"D6\" : 1174.659, \"D#6\" : 1244.508, \"E6\" : 1318.510, \"F6\" : 1396.913, \"F#6\"",
"21.827, \"F#0\" : 23.125, \"G0\" : 24.500, \"G#0\" : 25.957, \"A0\" : 27.500,",
"130.813, \"C#3\" : 138.591, \"D3\" : 146.832, \"D#3\" : 155.563, \"E3\" : 164.814,",
"415.305, \"A4\" : 440.000, \"A#4\" : 466.164, \"B4\" : 493.883, \"C5\" : 523.251,",
"\"F0\" : 21.827, \"F#0\" : 23.125, \"G0\" : 24.500, \"G#0\" : 25.957, \"A0\"",
": 1318.510, \"F6\" : 1396.913, \"F#6\" : 1479.978, \"G6\" : 1567.982, \"G#6\" :",
"82.407, \"F2\" : 87.307, \"F#2\" : 92.499, \"G2\" : 97.999, \"G#2\" : 103.826,",
": 130.813, \"C#3\" : 138.591, \"D3\" : 146.832, \"D#3\" : 155.563, \"E3\" :",
"30.868, \"C1\" : 32.703, \"C#1\" : 34.648, \"D1\" : 36.708, \"D#1\" : 38.891,",
"2349.318, \"D#7\" : 2489.016, \"E7\" : 2637.020, \"F7\" : 2793.826, \"F#7\" : 2959.955,",
"1567.982, \"G#6\" : 1661.219, \"A6\" : 1760.000, \"A#6\" : 1864.655, \"B6\" : 1975.533,",
"\"F-1\" : 10.913, \"F#-1\": 11.562, \"G-1\" : 12.250, \"G#-1\": 12.978, \"A-1\" : 13.750,",
": 19.445, \"E0\" : 20.602, \"F0\" : 21.827, \"F#0\" : 23.125, \"G0\" :",
"\"C#5\" : 554.365, \"D5\" : 587.330, \"D#5\" : 622.254, \"E5\" : 659.255, \"F5\"",
"29.135, \"B0\" : 30.868, \"C1\" : 32.703, \"C#1\" : 34.648, \"D1\" : 36.708,",
"\"B4\" : 493.883, \"C5\" : 523.251, \"C#5\" : 554.365, \"D5\" : 587.330, \"D#5\"",
"\"A7\" : 3520.000, \"A#7\" : 3729.310, \"B7\" : 3951.066, \"C8\" : 4186.009, \"C#8\"",
"\"F#0\" : 23.125, \"G0\" : 24.500, \"G#0\" : 25.957, \"A0\" : 27.500, \"A#0\"",
"\"B6\" : 1975.533, \"C7\" : 2093.005, \"C#7\" : 2217.461, \"D7\" : 2349.318, \"D#7\"",
"6271.927, \"G#8\" : 6644.875, \"A8\" : 7040.000, \"A#8\" : 7458.620, \"B8\" : 7902.133,",
"1479.978, \"G6\" : 1567.982, \"G#6\" : 1661.219, \"A6\" : 1760.000, \"A#6\" : 1864.655,",
"19.445, \"E0\" : 20.602, \"F0\" : 21.827, \"F#0\" : 23.125, \"G0\" : 24.500,",
"116.541, \"B2\" : 123.471, \"C3\" : 130.813, \"C#3\" : 138.591, \"D3\" : 146.832,",
"207.652, \"A3\" : 220.000, \"A#3\" : 233.082, \"B3\" : 246.942, \"C4\" : 261.626,",
": 622.254, \"E5\" : 659.255, \"F5\" : 698.456, \"F#5\" : 739.989, \"G5\" :",
"\"C9\" : 8372.018, \"C#9\" : 8869.844, \"D9\" : 9397.273, \"D#9\" : 9956.063, \"E9\"",
"\"A#6\" : 1864.655, \"B6\" : 1975.533, \"C7\" : 2093.005, \"C#7\" : 2217.461, \"D7\"",
"\"A2\" : 110.000, \"A#2\" : 116.541, \"B2\" : 123.471, \"C3\" : 130.813, \"C#3\"",
"65.406, \"C#2\" : 69.296, \"D2\" : 73.416, \"D#2\" : 77.782, \"E2\" : 82.407,",
": 23.125, \"G0\" : 24.500, \"G#0\" : 25.957, \"A0\" : 27.500, \"A#0\" :",
"2637.020, \"F7\" : 2793.826, \"F#7\" : 2959.955, \"G7\" : 3135.963, \"G#7\" : 3322.438,",
": 9956.063, \"E9\" : 10548.08, \"F9\" : 11175.30, \"F#9\" : 11839.82, \"G9\" :",
"\"G4\" : 391.995, \"G#4\" : 415.305, \"A4\" : 440.000, \"A#4\" : 466.164, \"B4\"",
"\"E7\" : 2637.020, \"F7\" : 2793.826, \"F#7\" : 2959.955, \"G7\" : 3135.963, \"G#7\"",
"\"A0\" : 27.500, \"A#0\" : 29.135, \"B0\" : 30.868, \"C1\" : 32.703, \"C#1\"",
"1396.913, \"F#6\" : 1479.978, \"G6\" : 1567.982, \"G#6\" : 1661.219, \"A6\" : 1760.000,",
"\"A-1\" : 13.750, \"A#-1\": 14.568, \"B-1\" : 15.434, \"C0\" : 16.352, \"C#0\" :",
"23.125, \"G0\" : 24.500, \"G#0\" : 25.957, \"A0\" : 27.500, \"A#0\" : 29.135,",
"2093.005, \"C#7\" : 2217.461, \"D7\" : 2349.318, \"D#7\" : 2489.016, \"E7\" : 2637.020,",
"\"C7\" : 2093.005, \"C#7\" : 2217.461, \"D7\" : 2349.318, \"D#7\" : 2489.016, \"E7\"",
": 466.164, \"B4\" : 493.883, \"C5\" : 523.251, \"C#5\" : 554.365, \"D5\" :",
": 9.177, \"D#-1\": 9.723, \"E-1\" : 10.301, \"F-1\" : 10.913, \"F#-1\": 11.562, \"G-1\"",
"\"G0\" : 24.500, \"G#0\" : 25.957, \"A0\" : 27.500, \"A#0\" : 29.135, \"B0\"",
": 27.500, \"A#0\" : 29.135, \"B0\" : 30.868, \"C1\" : 32.703, \"C#1\" :",
"830.609, \"A5\" : 880.000, \"A#5\" : 932.328, \"B5\" : 987.767, \"C6\" : 1046.502,",
"10.913, \"F#-1\": 11.562, \"G-1\" : 12.250, \"G#-1\": 12.978, \"A-1\" : 13.750, \"A#-1\": 14.568,",
"\"D9\" : 9397.273, \"D#9\" : 9956.063, \"E9\" : 10548.08, \"F9\" : 11175.30, \"F#9\"",
": 77.782, \"E2\" : 82.407, \"F2\" : 87.307, \"F#2\" : 92.499, \"G2\" :",
": 1174.659, \"D#6\" : 1244.508, \"E6\" : 1318.510, \"F6\" : 1396.913, \"F#6\" :",
"9956.063, \"E9\" : 10548.08, \"F9\" : 11175.30, \"F#9\" : 11839.82, \"G9\" : 12543.85,",
"\"D0\" : 18.354, \"D#0\" : 19.445, \"E0\" : 20.602, \"F0\" : 21.827, \"F#0\"",
"6644.875, \"A8\" : 7040.000, \"A#8\" : 7458.620, \"B8\" : 7902.133, \"C9\" : 8372.018,",
": 246.942, \"C4\" : 261.626, \"C#4\" : 277.183, \"D4\" : 293.665, \"D#4\" :",
"\"A1\" : 55.000, \"A#1\" : 58.270, \"B1\" : 61.735, \"C2\" : 65.406, \"C#2\"",
"698.456, \"F#5\" : 739.989, \"G5\" : 783.991, \"G#5\" : 830.609, \"A5\" : 880.000,",
"587.330, \"D#5\" : 622.254, \"E5\" : 659.255, \"F5\" : 698.456, \"F#5\" : 739.989,",
"2793.826, \"F#7\" : 2959.955, \"G7\" : 3135.963, \"G#7\" : 3322.438, \"A7\" : 3520.000,",
"\"D#0\" : 19.445, \"E0\" : 20.602, \"F0\" : 21.827, \"F#0\" : 23.125, \"G0\"",
": 110.000, \"A#2\" : 116.541, \"B2\" : 123.471, \"C3\" : 130.813, \"C#3\" :",
"\"E-1\" : 10.301, \"F-1\" : 10.913, \"F#-1\": 11.562, \"G-1\" : 12.250, \"G#-1\": 12.978,",
": 174.614, \"F#3\" : 184.997, \"G3\" : 195.998, \"G#3\" : 207.652, \"A3\" :",
"\"G7\" : 3135.963, \"G#7\" : 3322.438, \"A7\" : 3520.000, \"A#7\" : 3729.310, \"B7\"",
": 164.814, \"F3\" : 174.614, \"F#3\" : 184.997, \"G3\" : 195.998, \"G#3\" :",
"\"F4\" : 349.228, \"F#4\" : 369.994, \"G4\" : 391.995, \"G#4\" : 415.305, \"A4\"",
"\"D#2\" : 77.782, \"E2\" : 82.407, \"F2\" : 87.307, \"F#2\" : 92.499, \"G2\"",
"\"F5\" : 698.456, \"F#5\" : 739.989, \"G5\" : 783.991, \"G#5\" : 830.609, \"A5\"",
"\"G3\" : 195.998, \"G#3\" : 207.652, \"A3\" : 220.000, \"A#3\" : 233.082, \"B3\"",
": 220.000, \"A#3\" : 233.082, \"B3\" : 246.942, \"C4\" : 261.626, \"C#4\" :",
": 15.434, \"C0\" : 16.352, \"C#0\" : 17.324, \"D0\" : 18.354, \"D#0\" :",
"\"D#-1\": 9.723, \"E-1\" : 10.301, \"F-1\" : 10.913, \"F#-1\": 11.562, \"G-1\" : 12.250,",
"\"B-1\" : 15.434, \"C0\" : 16.352, \"C#0\" : 17.324, \"D0\" : 18.354, \"D#0\"",
"8869.844, \"D9\" : 9397.273, \"D#9\" : 9956.063, \"E9\" : 10548.08, \"F9\" : 11175.30,",
"1864.655, \"B6\" : 1975.533, \"C7\" : 2093.005, \"C#7\" : 2217.461, \"D7\" : 2349.318,",
"73.416, \"D#2\" : 77.782, \"E2\" : 82.407, \"F2\" : 87.307, \"F#2\" : 92.499,",
"11.562, \"G-1\" : 12.250, \"G#-1\": 12.978, \"A-1\" : 13.750, \"A#-1\": 14.568, \"B-1\" :",
"\"G#7\" : 3322.438, \"A7\" : 3520.000, \"A#7\" : 3729.310, \"B7\" : 3951.066, \"C8\"",
"\"D8\" : 4698.636, \"D#8\" : 4978.032, \"E8\" : 5274.041, \"F8\" : 5587.652, \"F#8\"",
"220.000, \"A#3\" : 233.082, \"B3\" : 246.942, \"C4\" : 261.626, \"C#4\" : 277.183,",
"\"F1\" : 43.654, \"F#1\" : 46.249, \"G1\" : 48.999, \"G#1\" : 51.913, \"A1\"",
"783.991, \"G#5\" : 830.609, \"A5\" : 880.000, \"A#5\" : 932.328, \"B5\" : 987.767,",
": 1396.913, \"F#6\" : 1479.978, \"G6\" : 1567.982, \"G#6\" : 1661.219, \"A6\" :",
"123.471, \"C3\" : 130.813, \"C#3\" : 138.591, \"D3\" : 146.832, \"D#3\" : 155.563,",
"10.301, \"F-1\" : 10.913, \"F#-1\": 11.562, \"G-1\" : 12.250, \"G#-1\": 12.978, \"A-1\" :",
"\"E9\" : 10548.08, \"F9\" : 11175.30, \"F#9\" : 11839.82, \"G9\" : 12543.85, }",
": 3322.438, \"A7\" : 3520.000, \"A#7\" : 3729.310, \"B7\" : 3951.066, \"C8\" :",
": 146.832, \"D#3\" : 155.563, \"E3\" : 164.814, \"F3\" : 174.614, \"F#3\" :",
": 25.957, \"A0\" : 27.500, \"A#0\" : 29.135, \"B0\" : 30.868, \"C1\" :",
": 123.471, \"C3\" : 130.813, \"C#3\" : 138.591, \"D3\" : 146.832, \"D#3\" :",
": 3135.963, \"G#7\" : 3322.438, \"A7\" : 3520.000, \"A#7\" : 3729.310, \"B7\" :",
": 329.628, \"F4\" : 349.228, \"F#4\" : 369.994, \"G4\" : 391.995, \"G#4\" :",
"\"C3\" : 130.813, \"C#3\" : 138.591, \"D3\" : 146.832, \"D#3\" : 155.563, \"E3\"",
": 69.296, \"D2\" : 73.416, \"D#2\" : 77.782, \"E2\" : 82.407, \"F2\" :",
"164.814, \"F3\" : 174.614, \"F#3\" : 184.997, \"G3\" : 195.998, \"G#3\" : 207.652,",
": 41.203, \"F1\" : 43.654, \"F#1\" : 46.249, \"G1\" : 48.999, \"G#1\" :",
": 554.365, \"D5\" : 587.330, \"D#5\" : 622.254, \"E5\" : 659.255, \"F5\" :",
"\"F#-1\": 11.562, \"G-1\" : 12.250, \"G#-1\": 12.978, \"A-1\" : 13.750, \"A#-1\": 14.568, \"B-1\"",
"391.995, \"G#4\" : 415.305, \"A4\" : 440.000, \"A#4\" : 466.164, \"B4\" : 493.883,",
": 830.609, \"A5\" : 880.000, \"A#5\" : 932.328, \"B5\" : 987.767, \"C6\" :",
"233.082, \"B3\" : 246.942, \"C4\" : 261.626, \"C#4\" : 277.183, \"D4\" : 293.665,",
"1318.510, \"F6\" : 1396.913, \"F#6\" : 1479.978, \"G6\" : 1567.982, \"G#6\" : 1661.219,",
"\"G1\" : 48.999, \"G#1\" : 51.913, \"A1\" : 55.000, \"A#1\" : 58.270, \"B1\"",
": 1046.502, \"C#6\" : 1108.731, \"D6\" : 1174.659, \"D#6\" : 1244.508, \"E6\" :",
"\"G#6\" : 1661.219, \"A6\" : 1760.000, \"A#6\" : 1864.655, \"B6\" : 1975.533, \"C7\"",
"\"C4\" : 261.626, \"C#4\" : 277.183, \"D4\" : 293.665, \"D#4\" : 311.127, \"E4\"",
"\"G8\" : 6271.927, \"G#8\" : 6644.875, \"A8\" : 7040.000, \"A#8\" : 7458.620, \"B8\"",
"\"C2\" : 65.406, \"C#2\" : 69.296, \"D2\" : 73.416, \"D#2\" : 77.782, \"E2\"",
"25.957, \"A0\" : 27.500, \"A#0\" : 29.135, \"B0\" : 30.868, \"C1\" : 32.703,",
"\"C0\" : 16.352, \"C#0\" : 17.324, \"D0\" : 18.354, \"D#0\" : 19.445, \"E0\"",
"493.883, \"C5\" : 523.251, \"C#5\" : 554.365, \"D5\" : 587.330, \"D#5\" : 622.254,",
": 1244.508, \"E6\" : 1318.510, \"F6\" : 1396.913, \"F#6\" : 1479.978, \"G6\" :",
"293.665, \"D#4\" : 311.127, \"E4\" : 329.628, \"F4\" : 349.228, \"F#4\" : 369.994,",
": 12.250, \"G#-1\": 12.978, \"A-1\" : 13.750, \"A#-1\": 14.568, \"B-1\" : 15.434, \"C0\"",
": 659.255, \"F5\" : 698.456, \"F#5\" : 739.989, \"G5\" : 783.991, \"G#5\" :",
": 1108.731, \"D6\" : 1174.659, \"D#6\" : 1244.508, \"E6\" : 1318.510, \"F6\" :",
": 97.999, \"G#2\" : 103.826, \"A2\" : 110.000, \"A#2\" : 116.541, \"B2\" :",
"\"B0\" : 30.868, \"C1\" : 32.703, \"C#1\" : 34.648, \"D1\" : 36.708, \"D#1\"",
": 184.997, \"G3\" : 195.998, \"G#3\" : 207.652, \"A3\" : 220.000, \"A#3\" :",
"3520.000, \"A#7\" : 3729.310, \"B7\" : 3951.066, \"C8\" : 4186.009, \"C#8\" : 4434.922,",
"\"G5\" : 783.991, \"G#5\" : 830.609, \"A5\" : 880.000, \"A#5\" : 932.328, \"B5\"",
"14.568, \"B-1\" : 15.434, \"C0\" : 16.352, \"C#0\" : 17.324, \"D0\" : 18.354,",
"9397.273, \"D#9\" : 9956.063, \"E9\" : 10548.08, \"F9\" : 11175.30, \"F#9\" : 11839.82,",
": 4978.032, \"E8\" : 5274.041, \"F8\" : 5587.652, \"F#8\" : 5919.911, \"G8\" :",
"\"A#2\" : 116.541, \"B2\" : 123.471, \"C3\" : 130.813, \"C#3\" : 138.591, \"D3\"",
"880.000, \"A#5\" : 932.328, \"B5\" : 987.767, \"C6\" : 1046.502, \"C#6\" : 1108.731,",
": 21.827, \"F#0\" : 23.125, \"G0\" : 24.500, \"G#0\" : 25.957, \"A0\" :",
"48.999, \"G#1\" : 51.913, \"A1\" : 55.000, \"A#1\" : 58.270, \"B1\" : 61.735,",
": 10.913, \"F#-1\": 11.562, \"G-1\" : 12.250, \"G#-1\": 12.978, \"A-1\" : 13.750, \"A#-1\":",
": 55.000, \"A#1\" : 58.270, \"B1\" : 61.735, \"C2\" : 65.406, \"C#2\" :",
"987.767, \"C6\" : 1046.502, \"C#6\" : 1108.731, \"D6\" : 1174.659, \"D#6\" : 1244.508,",
"\"A#1\" : 58.270, \"B1\" : 61.735, \"C2\" : 65.406, \"C#2\" : 69.296, \"D2\"",
"184.997, \"G3\" : 195.998, \"G#3\" : 207.652, \"A3\" : 220.000, \"A#3\" : 233.082,",
": 6644.875, \"A8\" : 7040.000, \"A#8\" : 7458.620, \"B8\" : 7902.133, \"C9\" :",
"1046.502, \"C#6\" : 1108.731, \"D6\" : 1174.659, \"D#6\" : 1244.508, \"E6\" : 1318.510,",
": 3729.310, \"B7\" : 3951.066, \"C8\" : 4186.009, \"C#8\" : 4434.922, \"D8\" :",
"92.499, \"G2\" : 97.999, \"G#2\" : 103.826, \"A2\" : 110.000, \"A#2\" : 116.541,",
": 2637.020, \"F7\" : 2793.826, \"F#7\" : 2959.955, \"G7\" : 3135.963, \"G#7\" :",
"\"F7\" : 2793.826, \"F#7\" : 2959.955, \"G7\" : 3135.963, \"G#7\" : 3322.438, \"A7\"",
"\"G#1\" : 51.913, \"A1\" : 55.000, \"A#1\" : 58.270, \"B1\" : 61.735, \"C2\"",
"15.434, \"C0\" : 16.352, \"C#0\" : 17.324, \"D0\" : 18.354, \"D#0\" : 19.445,",
": 2093.005, \"C#7\" : 2217.461, \"D7\" : 2349.318, \"D#7\" : 2489.016, \"E7\" :",
"\"E8\" : 5274.041, \"F8\" : 5587.652, \"F#8\" : 5919.911, \"G8\" : 6271.927, \"G#8\"",
": 277.183, \"D4\" : 293.665, \"D#4\" : 311.127, \"E4\" : 329.628, \"F4\" :",
"\"G#3\" : 207.652, \"A3\" : 220.000, \"A#3\" : 233.082, \"B3\" : 246.942, \"C4\"",
"5587.652, \"F#8\" : 5919.911, \"G8\" : 6271.927, \"G#8\" : 6644.875, \"A8\" : 7040.000,",
": 1864.655, \"B6\" : 1975.533, \"C7\" : 2093.005, \"C#7\" : 2217.461, \"D7\" :",
"\"C#-1\": 8.662, \"D-1\" : 9.177, \"D#-1\": 9.723, \"E-1\" : 10.301, \"F-1\" : 10.913,",
"69.296, \"D2\" : 73.416, \"D#2\" : 77.782, \"E2\" : 82.407, \"F2\" : 87.307,",
"277.183, \"D4\" : 293.665, \"D#4\" : 311.127, \"E4\" : 329.628, \"F4\" : 349.228,",
": 3951.066, \"C8\" : 4186.009, \"C#8\" : 4434.922, \"D8\" : 4698.636, \"D#8\" :",
"\"G#0\" : 25.957, \"A0\" : 27.500, \"A#0\" : 29.135, \"B0\" : 30.868, \"C1\"",
": 369.994, \"G4\" : 391.995, \"G#4\" : 415.305, \"A4\" : 440.000, \"A#4\" :",
"369.994, \"G4\" : 391.995, \"G#4\" : 415.305, \"A4\" : 440.000, \"A#4\" : 466.164,",
"\"D#8\" : 4978.032, \"E8\" : 5274.041, \"F8\" : 5587.652, \"F#8\" : 5919.911, \"G8\"",
": 73.416, \"D#2\" : 77.782, \"E2\" : 82.407, \"F2\" : 87.307, \"F#2\" :",
": 880.000, \"A#5\" : 932.328, \"B5\" : 987.767, \"C6\" : 1046.502, \"C#6\" :",
"61.735, \"C2\" : 65.406, \"C#2\" : 69.296, \"D2\" : 73.416, \"D#2\" : 77.782,",
": 1760.000, \"A#6\" : 1864.655, \"B6\" : 1975.533, \"C7\" : 2093.005, \"C#7\" :",
": 1975.533, \"C7\" : 2093.005, \"C#7\" : 2217.461, \"D7\" : 2349.318, \"D#7\" :",
"= { \"C-1\" : 8.176, \"C#-1\": 8.662, \"D-1\" : 9.177, \"D#-1\": 9.723, \"E-1\"",
"554.365, \"D5\" : 587.330, \"D#5\" : 622.254, \"E5\" : 659.255, \"F5\" : 698.456,",
"8372.018, \"C#9\" : 8869.844, \"D9\" : 9397.273, \"D#9\" : 9956.063, \"E9\" : 10548.08,",
"55.000, \"A#1\" : 58.270, \"B1\" : 61.735, \"C2\" : 65.406, \"C#2\" : 69.296,",
"87.307, \"F#2\" : 92.499, \"G2\" : 97.999, \"G#2\" : 103.826, \"A2\" : 110.000,",
": 87.307, \"F#2\" : 92.499, \"G2\" : 97.999, \"G#2\" : 103.826, \"A2\" :",
": 2489.016, \"E7\" : 2637.020, \"F7\" : 2793.826, \"F#7\" : 2959.955, \"G7\" :",
": 38.891, \"E1\" : 41.203, \"F1\" : 43.654, \"F#1\" : 46.249, \"G1\" :",
"\"D1\" : 36.708, \"D#1\" : 38.891, \"E1\" : 41.203, \"F1\" : 43.654, \"F#1\"",
"\"E0\" : 20.602, \"F0\" : 21.827, \"F#0\" : 23.125, \"G0\" : 24.500, \"G#0\"",
": 20.602, \"F0\" : 21.827, \"F#0\" : 23.125, \"G0\" : 24.500, \"G#0\" :",
": 32.703, \"C#1\" : 34.648, \"D1\" : 36.708, \"D#1\" : 38.891, \"E1\" :",
": 92.499, \"G2\" : 97.999, \"G#2\" : 103.826, \"A2\" : 110.000, \"A#2\" :",
"\"C1\" : 32.703, \"C#1\" : 34.648, \"D1\" : 36.708, \"D#1\" : 38.891, \"E1\"",
"24.500, \"G#0\" : 25.957, \"A0\" : 27.500, \"A#0\" : 29.135, \"B0\" : 30.868,",
": 195.998, \"G#3\" : 207.652, \"A3\" : 220.000, \"A#3\" : 233.082, \"B3\" :",
"\"C#9\" : 8869.844, \"D9\" : 9397.273, \"D#9\" : 9956.063, \"E9\" : 10548.08, \"F9\"",
"\"E2\" : 82.407, \"F2\" : 87.307, \"F#2\" : 92.499, \"G2\" : 97.999, \"G#2\"",
": 138.591, \"D3\" : 146.832, \"D#3\" : 155.563, \"E3\" : 164.814, \"F3\" :",
"pitch_table = { \"C-1\" : 8.176, \"C#-1\": 8.662, \"D-1\" : 9.177, \"D#-1\": 9.723,",
"523.251, \"C#5\" : 554.365, \"D5\" : 587.330, \"D#5\" : 622.254, \"E5\" : 659.255,",
"\"A#3\" : 233.082, \"B3\" : 246.942, \"C4\" : 261.626, \"C#4\" : 277.183, \"D4\"",
": 43.654, \"F#1\" : 46.249, \"G1\" : 48.999, \"G#1\" : 51.913, \"A1\" :",
"\"E5\" : 659.255, \"F5\" : 698.456, \"F#5\" : 739.989, \"G5\" : 783.991, \"G#5\"",
": 440.000, \"A#4\" : 466.164, \"B4\" : 493.883, \"C5\" : 523.251, \"C#5\" :",
"3322.438, \"A7\" : 3520.000, \"A#7\" : 3729.310, \"B7\" : 3951.066, \"C8\" : 4186.009,",
": 9397.273, \"D#9\" : 9956.063, \"E9\" : 10548.08, \"F9\" : 11175.30, \"F#9\" :",
"932.328, \"B5\" : 987.767, \"C6\" : 1046.502, \"C#6\" : 1108.731, \"D6\" : 1174.659,",
": 1661.219, \"A6\" : 1760.000, \"A#6\" : 1864.655, \"B6\" : 1975.533, \"C7\" :",
"440.000, \"A#4\" : 466.164, \"B4\" : 493.883, \"C5\" : 523.251, \"C#5\" : 554.365,",
": 4698.636, \"D#8\" : 4978.032, \"E8\" : 5274.041, \"F8\" : 5587.652, \"F#8\" :",
"\"D2\" : 73.416, \"D#2\" : 77.782, \"E2\" : 82.407, \"F2\" : 87.307, \"F#2\"",
"\"F#2\" : 92.499, \"G2\" : 97.999, \"G#2\" : 103.826, \"A2\" : 110.000, \"A#2\"",
": 932.328, \"B5\" : 987.767, \"C6\" : 1046.502, \"C#6\" : 1108.731, \"D6\" :",
"41.203, \"F1\" : 43.654, \"F#1\" : 46.249, \"G1\" : 48.999, \"G#1\" : 51.913,",
"\"D#3\" : 155.563, \"E3\" : 164.814, \"F3\" : 174.614, \"F#3\" : 184.997, \"G3\"",
"1108.731, \"D6\" : 1174.659, \"D#6\" : 1244.508, \"E6\" : 1318.510, \"F6\" : 1396.913,",
"\"G#4\" : 415.305, \"A4\" : 440.000, \"A#4\" : 466.164, \"B4\" : 493.883, \"C5\"",
"\"F#4\" : 369.994, \"G4\" : 391.995, \"G#4\" : 415.305, \"A4\" : 440.000, \"A#4\"",
": 3520.000, \"A#7\" : 3729.310, \"B7\" : 3951.066, \"C8\" : 4186.009, \"C#8\" :",
": 29.135, \"B0\" : 30.868, \"C1\" : 32.703, \"C#1\" : 34.648, \"D1\" :",
"622.254, \"E5\" : 659.255, \"F5\" : 698.456, \"F#5\" : 739.989, \"G5\" : 783.991,",
"\"C#7\" : 2217.461, \"D7\" : 2349.318, \"D#7\" : 2489.016, \"E7\" : 2637.020, \"F7\"",
": 523.251, \"C#5\" : 554.365, \"D5\" : 587.330, \"D#5\" : 622.254, \"E5\" :",
"51.913, \"A1\" : 55.000, \"A#1\" : 58.270, \"B1\" : 61.735, \"C2\" : 65.406,",
"\"D3\" : 146.832, \"D#3\" : 155.563, \"E3\" : 164.814, \"F3\" : 174.614, \"F#3\"",
": 155.563, \"E3\" : 164.814, \"F3\" : 174.614, \"F#3\" : 184.997, \"G3\" :",
"9.723, \"E-1\" : 10.301, \"F-1\" : 10.913, \"F#-1\": 11.562, \"G-1\" : 12.250, \"G#-1\":",
": 698.456, \"F#5\" : 739.989, \"G5\" : 783.991, \"G#5\" : 830.609, \"A5\" :",
"9.177, \"D#-1\": 9.723, \"E-1\" : 10.301, \"F-1\" : 10.913, \"F#-1\": 11.562, \"G-1\" :",
": 349.228, \"F#4\" : 369.994, \"G4\" : 391.995, \"G#4\" : 415.305, \"A4\" :",
": 4434.922, \"D8\" : 4698.636, \"D#8\" : 4978.032, \"E8\" : 5274.041, \"F8\" :",
": 2217.461, \"D7\" : 2349.318, \"D#7\" : 2489.016, \"E7\" : 2637.020, \"F7\" :",
"\"F3\" : 174.614, \"F#3\" : 184.997, \"G3\" : 195.998, \"G#3\" : 207.652, \"A3\"",
"\"A6\" : 1760.000, \"A#6\" : 1864.655, \"B6\" : 1975.533, \"C7\" : 2093.005, \"C#7\"",
"77.782, \"E2\" : 82.407, \"F2\" : 87.307, \"F#2\" : 92.499, \"G2\" : 97.999,",
"\"F#8\" : 5919.911, \"G8\" : 6271.927, \"G#8\" : 6644.875, \"A8\" : 7040.000, \"A#8\"",
"\"D-1\" : 9.177, \"D#-1\": 9.723, \"E-1\" : 10.301, \"F-1\" : 10.913, \"F#-1\": 11.562,",
": 58.270, \"B1\" : 61.735, \"C2\" : 65.406, \"C#2\" : 69.296, \"D2\" :",
"\"A#5\" : 932.328, \"B5\" : 987.767, \"C6\" : 1046.502, \"C#6\" : 1108.731, \"D6\"",
"\"F#6\" : 1479.978, \"G6\" : 1567.982, \"G#6\" : 1661.219, \"A6\" : 1760.000, \"A#6\"",
"261.626, \"C#4\" : 277.183, \"D4\" : 293.665, \"D#4\" : 311.127, \"E4\" : 329.628,",
": 783.991, \"G#5\" : 830.609, \"A5\" : 880.000, \"A#5\" : 932.328, \"B5\" :",
": 8372.018, \"C#9\" : 8869.844, \"D9\" : 9397.273, \"D#9\" : 9956.063, \"E9\" :",
"1174.659, \"D#6\" : 1244.508, \"E6\" : 1318.510, \"F6\" : 1396.913, \"F#6\" : 1479.978,",
": 82.407, \"F2\" : 87.307, \"F#2\" : 92.499, \"G2\" : 97.999, \"G#2\" :",
"5274.041, \"F8\" : 5587.652, \"F#8\" : 5919.911, \"G8\" : 6271.927, \"G#8\" : 6644.875,",
"1661.219, \"A6\" : 1760.000, \"A#6\" : 1864.655, \"B6\" : 1975.533, \"C7\" : 2093.005,",
"\"B3\" : 246.942, \"C4\" : 261.626, \"C#4\" : 277.183, \"D4\" : 293.665, \"D#4\"",
"2217.461, \"D7\" : 2349.318, \"D#7\" : 2489.016, \"E7\" : 2637.020, \"F7\" : 2793.826,",
": 5274.041, \"F8\" : 5587.652, \"F#8\" : 5919.911, \"G8\" : 6271.927, \"G#8\" :",
"17.324, \"D0\" : 18.354, \"D#0\" : 19.445, \"E0\" : 20.602, \"F0\" : 21.827,",
"\"B7\" : 3951.066, \"C8\" : 4186.009, \"C#8\" : 4434.922, \"D8\" : 4698.636, \"D#8\"",
"\"D5\" : 587.330, \"D#5\" : 622.254, \"E5\" : 659.255, \"F5\" : 698.456, \"F#5\"",
": 103.826, \"A2\" : 110.000, \"A#2\" : 116.541, \"B2\" : 123.471, \"C3\" :",
"4434.922, \"D8\" : 4698.636, \"D#8\" : 4978.032, \"E8\" : 5274.041, \"F8\" : 5587.652,",
": 2793.826, \"F#7\" : 2959.955, \"G7\" : 3135.963, \"G#7\" : 3322.438, \"A7\" :",
"\"A5\" : 880.000, \"A#5\" : 932.328, \"B5\" : 987.767, \"C6\" : 1046.502, \"C#6\"",
": 51.913, \"A1\" : 55.000, \"A#1\" : 58.270, \"B1\" : 61.735, \"C2\" :",
": 24.500, \"G#0\" : 25.957, \"A0\" : 27.500, \"A#0\" : 29.135, \"B0\" :",
": 261.626, \"C#4\" : 277.183, \"D4\" : 293.665, \"D#4\" : 311.127, \"E4\" :",
"\"A#7\" : 3729.310, \"B7\" : 3951.066, \"C8\" : 4186.009, \"C#8\" : 4434.922, \"D8\"",
"\"C#6\" : 1108.731, \"D6\" : 1174.659, \"D#6\" : 1244.508, \"E6\" : 1318.510, \"F6\"",
": 46.249, \"G1\" : 48.999, \"G#1\" : 51.913, \"A1\" : 55.000, \"A#1\" :",
": 8.176, \"C#-1\": 8.662, \"D-1\" : 9.177, \"D#-1\": 9.723, \"E-1\" : 10.301, \"F-1\"",
"659.255, \"F5\" : 698.456, \"F#5\" : 739.989, \"G5\" : 783.991, \"G#5\" : 830.609,",
"\"E6\" : 1318.510, \"F6\" : 1396.913, \"F#6\" : 1479.978, \"G6\" : 1567.982, \"G#6\"",
": 987.767, \"C6\" : 1046.502, \"C#6\" : 1108.731, \"D6\" : 1174.659, \"D#6\" :",
"\"G-1\" : 12.250, \"G#-1\": 12.978, \"A-1\" : 13.750, \"A#-1\": 14.568, \"B-1\" : 15.434,",
"7040.000, \"A#8\" : 7458.620, \"B8\" : 7902.133, \"C9\" : 8372.018, \"C#9\" : 8869.844,",
"\"B1\" : 61.735, \"C2\" : 65.406, \"C#2\" : 69.296, \"D2\" : 73.416, \"D#2\"",
"\"C6\" : 1046.502, \"C#6\" : 1108.731, \"D6\" : 1174.659, \"D#6\" : 1244.508, \"E6\"",
"349.228, \"F#4\" : 369.994, \"G4\" : 391.995, \"G#4\" : 415.305, \"A4\" : 440.000,",
"\"D#5\" : 622.254, \"E5\" : 659.255, \"F5\" : 698.456, \"F#5\" : 739.989, \"G5\"",
": 48.999, \"G#1\" : 51.913, \"A1\" : 55.000, \"A#1\" : 58.270, \"B1\" :",
"7902.133, \"C9\" : 8372.018, \"C#9\" : 8869.844, \"D9\" : 9397.273, \"D#9\" : 9956.063,",
"\"F#3\" : 184.997, \"G3\" : 195.998, \"G#3\" : 207.652, \"A3\" : 220.000, \"A#3\"",
": 36.708, \"D#1\" : 38.891, \"E1\" : 41.203, \"F1\" : 43.654, \"F#1\" :",
"1244.508, \"E6\" : 1318.510, \"F6\" : 1396.913, \"F#6\" : 1479.978, \"G6\" : 1567.982,",
"1975.533, \"C7\" : 2093.005, \"C#7\" : 2217.461, \"D7\" : 2349.318, \"D#7\" : 2489.016,",
": 391.995, \"G#4\" : 415.305, \"A4\" : 440.000, \"A#4\" : 466.164, \"B4\" :",
"34.648, \"D1\" : 36.708, \"D#1\" : 38.891, \"E1\" : 41.203, \"F1\" : 43.654,",
"38.891, \"E1\" : 41.203, \"F1\" : 43.654, \"F#1\" : 46.249, \"G1\" : 48.999,",
": 5587.652, \"F#8\" : 5919.911, \"G8\" : 6271.927, \"G#8\" : 6644.875, \"A8\" :",
"4978.032, \"E8\" : 5274.041, \"F8\" : 5587.652, \"F#8\" : 5919.911, \"G8\" : 6271.927,",
"195.998, \"G#3\" : 207.652, \"A3\" : 220.000, \"A#3\" : 233.082, \"B3\" : 246.942,",
"\"D#7\" : 2489.016, \"E7\" : 2637.020, \"F7\" : 2793.826, \"F#7\" : 2959.955, \"G7\"",
": 30.868, \"C1\" : 32.703, \"C#1\" : 34.648, \"D1\" : 36.708, \"D#1\" :",
"\"A8\" : 7040.000, \"A#8\" : 7458.620, \"B8\" : 7902.133, \"C9\" : 8372.018, \"C#9\"",
": 233.082, \"B3\" : 246.942, \"C4\" : 261.626, \"C#4\" : 277.183, \"D4\" :",
"\"A#0\" : 29.135, \"B0\" : 30.868, \"C1\" : 32.703, \"C#1\" : 34.648, \"D1\"",
": 587.330, \"D#5\" : 622.254, \"E5\" : 659.255, \"F5\" : 698.456, \"F#5\" :",
"\"C#4\" : 277.183, \"D4\" : 293.665, \"D#4\" : 311.127, \"E4\" : 329.628, \"F4\"",
"\"B8\" : 7902.133, \"C9\" : 8372.018, \"C#9\" : 8869.844, \"D9\" : 9397.273, \"D#9\"",
": 5919.911, \"G8\" : 6271.927, \"G#8\" : 6644.875, \"A8\" : 7040.000, \"A#8\" :",
": 17.324, \"D0\" : 18.354, \"D#0\" : 19.445, \"E0\" : 20.602, \"F0\" :",
"\"G2\" : 97.999, \"G#2\" : 103.826, \"A2\" : 110.000, \"A#2\" : 116.541, \"B2\"",
"\"F#1\" : 46.249, \"G1\" : 48.999, \"G#1\" : 51.913, \"A1\" : 55.000, \"A#1\"",
"\"C#2\" : 69.296, \"D2\" : 73.416, \"D#2\" : 77.782, \"E2\" : 82.407, \"F2\"",
": 7902.133, \"C9\" : 8372.018, \"C#9\" : 8869.844, \"D9\" : 9397.273, \"D#9\" :",
"46.249, \"G1\" : 48.999, \"G#1\" : 51.913, \"A1\" : 55.000, \"A#1\" : 58.270,",
"4186.009, \"C#8\" : 4434.922, \"D8\" : 4698.636, \"D#8\" : 4978.032, \"E8\" : 5274.041,",
"246.942, \"C4\" : 261.626, \"C#4\" : 277.183, \"D4\" : 293.665, \"D#4\" : 311.127,",
"8.176, \"C#-1\": 8.662, \"D-1\" : 9.177, \"D#-1\": 9.723, \"E-1\" : 10.301, \"F-1\" :",
": 16.352, \"C#0\" : 17.324, \"D0\" : 18.354, \"D#0\" : 19.445, \"E0\" :",
"110.000, \"A#2\" : 116.541, \"B2\" : 123.471, \"C3\" : 130.813, \"C#3\" : 138.591,",
": 739.989, \"G5\" : 783.991, \"G#5\" : 830.609, \"A5\" : 880.000, \"A#5\" :",
": 18.354, \"D#0\" : 19.445, \"E0\" : 20.602, \"F0\" : 21.827, \"F#0\" :",
"\"F#5\" : 739.989, \"G5\" : 783.991, \"G#5\" : 830.609, \"A5\" : 880.000, \"A#5\"",
": 415.305, \"A4\" : 440.000, \"A#4\" : 466.164, \"B4\" : 493.883, \"C5\" :",
"1760.000, \"A#6\" : 1864.655, \"B6\" : 1975.533, \"C7\" : 2093.005, \"C#7\" : 2217.461,",
": 1479.978, \"G6\" : 1567.982, \"G#6\" : 1661.219, \"A6\" : 1760.000, \"A#6\" :",
": 1567.982, \"G#6\" : 1661.219, \"A6\" : 1760.000, \"A#6\" : 1864.655, \"B6\" :",
"8.662, \"D-1\" : 9.177, \"D#-1\": 9.723, \"E-1\" : 10.301, \"F-1\" : 10.913, \"F#-1\":",
"739.989, \"G5\" : 783.991, \"G#5\" : 830.609, \"A5\" : 880.000, \"A#5\" : 932.328,",
": 207.652, \"A3\" : 220.000, \"A#3\" : 233.082, \"B3\" : 246.942, \"C4\" :",
"\"F6\" : 1396.913, \"F#6\" : 1479.978, \"G6\" : 1567.982, \"G#6\" : 1661.219, \"A6\"",
": 311.127, \"E4\" : 329.628, \"F4\" : 349.228, \"F#4\" : 369.994, \"G4\" :",
": 34.648, \"D1\" : 36.708, \"D#1\" : 38.891, \"E1\" : 41.203, \"F1\" :",
": 4186.009, \"C#8\" : 4434.922, \"D8\" : 4698.636, \"D#8\" : 4978.032, \"E8\" :",
"329.628, \"F4\" : 349.228, \"F#4\" : 369.994, \"G4\" : 391.995, \"G#4\" : 415.305,",
"27.500, \"A#0\" : 29.135, \"B0\" : 30.868, \"C1\" : 32.703, \"C#1\" : 34.648,",
": 2959.955, \"G7\" : 3135.963, \"G#7\" : 3322.438, \"A7\" : 3520.000, \"A#7\" :",
"\"C#3\" : 138.591, \"D3\" : 146.832, \"D#3\" : 155.563, \"E3\" : 164.814, \"F3\"",
"\"A#-1\": 14.568, \"B-1\" : 15.434, \"C0\" : 16.352, \"C#0\" : 17.324, \"D0\" :",
"\"E4\" : 329.628, \"F4\" : 349.228, \"F#4\" : 369.994, \"G4\" : 391.995, \"G#4\"",
"\"B2\" : 123.471, \"C3\" : 130.813, \"C#3\" : 138.591, \"D3\" : 146.832, \"D#3\"",
": 7458.620, \"B8\" : 7902.133, \"C9\" : 8372.018, \"C#9\" : 8869.844, \"D9\" :",
"\"F2\" : 87.307, \"F#2\" : 92.499, \"G2\" : 97.999, \"G#2\" : 103.826, \"A2\"",
"\"G#8\" : 6644.875, \"A8\" : 7040.000, \"A#8\" : 7458.620, \"B8\" : 7902.133, \"C9\"",
"43.654, \"F#1\" : 46.249, \"G1\" : 48.999, \"G#1\" : 51.913, \"A1\" : 55.000,",
"\"A4\" : 440.000, \"A#4\" : 466.164, \"B4\" : 493.883, \"C5\" : 523.251, \"C#5\"",
": 8869.844, \"D9\" : 9397.273, \"D#9\" : 9956.063, \"E9\" : 10548.08, \"F9\" :",
"\"G#-1\": 12.978, \"A-1\" : 13.750, \"A#-1\": 14.568, \"B-1\" : 15.434, \"C0\" : 16.352,",
"311.127, \"E4\" : 329.628, \"F4\" : 349.228, \"F#4\" : 369.994, \"G4\" : 391.995,",
"3729.310, \"B7\" : 3951.066, \"C8\" : 4186.009, \"C#8\" : 4434.922, \"D8\" : 4698.636,",
"\"F#7\" : 2959.955, \"G7\" : 3135.963, \"G#7\" : 3322.438, \"A7\" : 3520.000, \"A#7\"",
"\"G6\" : 1567.982, \"G#6\" : 1661.219, \"A6\" : 1760.000, \"A#6\" : 1864.655, \"B6\"",
": 116.541, \"B2\" : 123.471, \"C3\" : 130.813, \"C#3\" : 138.591, \"D3\" :",
"2959.955, \"G7\" : 3135.963, \"G#7\" : 3322.438, \"A7\" : 3520.000, \"A#7\" : 3729.310,",
"\"C#8\" : 4434.922, \"D8\" : 4698.636, \"D#8\" : 4978.032, \"E8\" : 5274.041, \"F8\"",
"\"G#2\" : 103.826, \"A2\" : 110.000, \"A#2\" : 116.541, \"B2\" : 123.471, \"C3\"",
"97.999, \"G#2\" : 103.826, \"A2\" : 110.000, \"A#2\" : 116.541, \"B2\" : 123.471,",
"12.250, \"G#-1\": 12.978, \"A-1\" : 13.750, \"A#-1\": 14.568, \"B-1\" : 15.434, \"C0\" :",
"\"C-1\" : 8.176, \"C#-1\": 8.662, \"D-1\" : 9.177, \"D#-1\": 9.723, \"E-1\" : 10.301,",
"\"D4\" : 293.665, \"D#4\" : 311.127, \"E4\" : 329.628, \"F4\" : 349.228, \"F#4\"",
"3135.963, \"G#7\" : 3322.438, \"A7\" : 3520.000, \"A#7\" : 3729.310, \"B7\" : 3951.066,",
"\"C5\" : 523.251, \"C#5\" : 554.365, \"D5\" : 587.330, \"D#5\" : 622.254, \"E5\"",
"2489.016, \"E7\" : 2637.020, \"F7\" : 2793.826, \"F#7\" : 2959.955, \"G7\" : 3135.963,",
"\"D7\" : 2349.318, \"D#7\" : 2489.016, \"E7\" : 2637.020, \"F7\" : 2793.826, \"F#7\"",
"146.832, \"D#3\" : 155.563, \"E3\" : 164.814, \"F3\" : 174.614, \"F#3\" : 184.997,",
"\"D#6\" : 1244.508, \"E6\" : 1318.510, \"F6\" : 1396.913, \"F#6\" : 1479.978, \"G6\"",
"20.602, \"F0\" : 21.827, \"F#0\" : 23.125, \"G0\" : 24.500, \"G#0\" : 25.957,",
": 493.883, \"C5\" : 523.251, \"C#5\" : 554.365, \"D5\" : 587.330, \"D#5\" :",
"\"B5\" : 987.767, \"C6\" : 1046.502, \"C#6\" : 1108.731, \"D6\" : 1174.659, \"D#6\"",
"\"D#9\" : 9956.063, \"E9\" : 10548.08, \"F9\" : 11175.30, \"F#9\" : 11839.82, \"G9\"",
"13.750, \"A#-1\": 14.568, \"B-1\" : 15.434, \"C0\" : 16.352, \"C#0\" : 17.324, \"D0\"",
"\"C8\" : 4186.009, \"C#8\" : 4434.922, \"D8\" : 4698.636, \"D#8\" : 4978.032, \"E8\""
] |
[
"implemented to use in configargparser in mind. To use it, call for example:",
"be called to replace an \"open\" call and write to memory file. File",
"exception of not closing the memory file on exit of a \"with\" statement",
"call and write to memory file. File content is then \"\"\" def __init__(self):",
"that can be written like a file but does not close at the",
"a \"with\" statement. Objects can be retrieved with the virtual path. This was",
"get an StringIO objects that can be written like a file but does",
"# Holds the config data \"\"\" import io class NotWithCloseStringIO(io.StringIO): \"\"\"This class is",
"__init__(self): self._string_ios = {} def __call__(self, virt_path, bla): self._string_ios[virt_path] = NotWithCloseStringIO() return self.string_ios[virt_path]",
"= NotWithCloseStringIO() return self.string_ios[virt_path] def __get__(self, key): return self._string_ios[key] def close(self): for key",
"like a file but does not close at the end of a \"with\"",
"mind. To use it, call for example: holder = StringIOHolder() parser = configargparse.ArgumentParser(description=\"Parser",
"file but does not close at the end of a \"with\" statement. Objects",
"\"open\" call and write to memory file. File content is then \"\"\" def",
"[\"virt_path\"]) holder[\"virt_path\"].getvalue() # Holds the config data \"\"\" import io class NotWithCloseStringIO(io.StringIO): \"\"\"This",
"\"\"\"Holds NotWithCloseStringIO objects and can be called to replace an \"open\" call and",
"data \"\"\" import io class NotWithCloseStringIO(io.StringIO): \"\"\"This class is just the normal StringIO",
"objects that can be written like a file but does not close at",
"at the end of a \"with\" statement. Objects can be retrieved with the",
"utils can be used to get an StringIO objects that can be written",
"= {} def __call__(self, virt_path, bla): self._string_ios[virt_path] = NotWithCloseStringIO() return self.string_ios[virt_path] def __get__(self,",
"then \"\"\" def __init__(self): self._string_ios = {} def __call__(self, virt_path, bla): self._string_ios[virt_path] =",
"to replace an \"open\" call and write to memory file. File content is",
"class is just the normal StringIO with the exception of not closing the",
"just the normal StringIO with the exception of not closing the memory file",
"the memory file on exit of a \"with\" statement \"\"\" def __exit__(self, type,",
"{} def __call__(self, virt_path, bla): self._string_ios[virt_path] = NotWithCloseStringIO() return self.string_ios[virt_path] def __get__(self, key):",
"in mind. To use it, call for example: holder = StringIOHolder() parser =",
"replace an \"open\" call and write to memory file. File content is then",
"class StringIOHolder(): \"\"\"Holds NotWithCloseStringIO objects and can be called to replace an \"open\"",
"\"with\" statement. Objects can be retrieved with the virtual path. This was implemented",
"parser.parse_args() parser.write_config_file(parsed, [\"virt_path\"]) holder[\"virt_path\"].getvalue() # Holds the config data \"\"\" import io class",
"is just the normal StringIO with the exception of not closing the memory",
"the normal StringIO with the exception of not closing the memory file on",
"the exception of not closing the memory file on exit of a \"with\"",
"to use in configargparser in mind. To use it, call for example: holder",
"This was implemented to use in configargparser in mind. To use it, call",
"holder = StringIOHolder() parser = configargparse.ArgumentParser(description=\"Parser for the instance evolver\", config_file_open_func=holder) ... parsed",
"StringIO with the exception of not closing the memory file on exit of",
"type, value, traceback): pass class StringIOHolder(): \"\"\"Holds NotWithCloseStringIO objects and can be called",
"used to get an StringIO objects that can be written like a file",
"File content is then \"\"\" def __init__(self): self._string_ios = {} def __call__(self, virt_path,",
"be written like a file but does not close at the end of",
"return self.string_ios[virt_path] def __get__(self, key): return self._string_ios[key] def close(self): for key in self._string_ios:",
"write to memory file. File content is then \"\"\" def __init__(self): self._string_ios =",
"import io class NotWithCloseStringIO(io.StringIO): \"\"\"This class is just the normal StringIO with the",
"can be retrieved with the virtual path. This was implemented to use in",
"= parser.parse_args() parser.write_config_file(parsed, [\"virt_path\"]) holder[\"virt_path\"].getvalue() # Holds the config data \"\"\" import io",
"NotWithCloseStringIO objects and can be called to replace an \"open\" call and write",
"parser = configargparse.ArgumentParser(description=\"Parser for the instance evolver\", config_file_open_func=holder) ... parsed = parser.parse_args() parser.write_config_file(parsed,",
"an \"open\" call and write to memory file. File content is then \"\"\"",
"class NotWithCloseStringIO(io.StringIO): \"\"\"This class is just the normal StringIO with the exception of",
"does not close at the end of a \"with\" statement. Objects can be",
"for example: holder = StringIOHolder() parser = configargparse.ArgumentParser(description=\"Parser for the instance evolver\", config_file_open_func=holder)",
"and write to memory file. File content is then \"\"\" def __init__(self): self._string_ios",
"end of a \"with\" statement. Objects can be retrieved with the virtual path.",
"of a \"with\" statement \"\"\" def __exit__(self, type, value, traceback): pass class StringIOHolder():",
"config_file_open_func=holder) ... parsed = parser.parse_args() parser.write_config_file(parsed, [\"virt_path\"]) holder[\"virt_path\"].getvalue() # Holds the config data",
"to memory file. File content is then \"\"\" def __init__(self): self._string_ios = {}",
"file. File content is then \"\"\" def __init__(self): self._string_ios = {} def __call__(self,",
"= configargparse.ArgumentParser(description=\"Parser for the instance evolver\", config_file_open_func=holder) ... parsed = parser.parse_args() parser.write_config_file(parsed, [\"virt_path\"])",
"virtual path. This was implemented to use in configargparser in mind. To use",
"objects and can be called to replace an \"open\" call and write to",
"use it, call for example: holder = StringIOHolder() parser = configargparse.ArgumentParser(description=\"Parser for the",
"self._string_ios = {} def __call__(self, virt_path, bla): self._string_ios[virt_path] = NotWithCloseStringIO() return self.string_ios[virt_path] def",
"not closing the memory file on exit of a \"with\" statement \"\"\" def",
"path. This was implemented to use in configargparser in mind. To use it,",
"parsed = parser.parse_args() parser.write_config_file(parsed, [\"virt_path\"]) holder[\"virt_path\"].getvalue() # Holds the config data \"\"\" import",
"file on exit of a \"with\" statement \"\"\" def __exit__(self, type, value, traceback):",
"use in configargparser in mind. To use it, call for example: holder =",
"exit of a \"with\" statement \"\"\" def __exit__(self, type, value, traceback): pass class",
"__call__(self, virt_path, bla): self._string_ios[virt_path] = NotWithCloseStringIO() return self.string_ios[virt_path] def __get__(self, key): return self._string_ios[key]",
"evolver\", config_file_open_func=holder) ... parsed = parser.parse_args() parser.write_config_file(parsed, [\"virt_path\"]) holder[\"virt_path\"].getvalue() # Holds the config",
"def __call__(self, virt_path, bla): self._string_ios[virt_path] = NotWithCloseStringIO() return self.string_ios[virt_path] def __get__(self, key): return",
"be retrieved with the virtual path. This was implemented to use in configargparser",
"is then \"\"\" def __init__(self): self._string_ios = {} def __call__(self, virt_path, bla): self._string_ios[virt_path]",
"StringIOHolder(): \"\"\"Holds NotWithCloseStringIO objects and can be called to replace an \"open\" call",
"self._string_ios[virt_path] = NotWithCloseStringIO() return self.string_ios[virt_path] def __get__(self, key): return self._string_ios[key] def close(self): for",
"normal StringIO with the exception of not closing the memory file on exit",
"the config data \"\"\" import io class NotWithCloseStringIO(io.StringIO): \"\"\"This class is just the",
"def __exit__(self, type, value, traceback): pass class StringIOHolder(): \"\"\"Holds NotWithCloseStringIO objects and can",
"was implemented to use in configargparser in mind. To use it, call for",
"\"\"\"These utils can be used to get an StringIO objects that can be",
"StringIOHolder() parser = configargparse.ArgumentParser(description=\"Parser for the instance evolver\", config_file_open_func=holder) ... parsed = parser.parse_args()",
"virt_path, bla): self._string_ios[virt_path] = NotWithCloseStringIO() return self.string_ios[virt_path] def __get__(self, key): return self._string_ios[key] def",
"the end of a \"with\" statement. Objects can be retrieved with the virtual",
"on exit of a \"with\" statement \"\"\" def __exit__(self, type, value, traceback): pass",
"of not closing the memory file on exit of a \"with\" statement \"\"\"",
"Objects can be retrieved with the virtual path. This was implemented to use",
"statement. Objects can be retrieved with the virtual path. This was implemented to",
"closing the memory file on exit of a \"with\" statement \"\"\" def __exit__(self,",
"can be used to get an StringIO objects that can be written like",
"it, call for example: holder = StringIOHolder() parser = configargparse.ArgumentParser(description=\"Parser for the instance",
"bla): self._string_ios[virt_path] = NotWithCloseStringIO() return self.string_ios[virt_path] def __get__(self, key): return self._string_ios[key] def close(self):",
"call for example: holder = StringIOHolder() parser = configargparse.ArgumentParser(description=\"Parser for the instance evolver\",",
"configargparser in mind. To use it, call for example: holder = StringIOHolder() parser",
"= StringIOHolder() parser = configargparse.ArgumentParser(description=\"Parser for the instance evolver\", config_file_open_func=holder) ... parsed =",
"\"\"\" def __exit__(self, type, value, traceback): pass class StringIOHolder(): \"\"\"Holds NotWithCloseStringIO objects and",
"in configargparser in mind. To use it, call for example: holder = StringIOHolder()",
"pass class StringIOHolder(): \"\"\"Holds NotWithCloseStringIO objects and can be called to replace an",
"of a \"with\" statement. Objects can be retrieved with the virtual path. This",
"but does not close at the end of a \"with\" statement. Objects can",
"with the virtual path. This was implemented to use in configargparser in mind.",
"content is then \"\"\" def __init__(self): self._string_ios = {} def __call__(self, virt_path, bla):",
"a file but does not close at the end of a \"with\" statement.",
"holder[\"virt_path\"].getvalue() # Holds the config data \"\"\" import io class NotWithCloseStringIO(io.StringIO): \"\"\"This class",
"def __init__(self): self._string_ios = {} def __call__(self, virt_path, bla): self._string_ios[virt_path] = NotWithCloseStringIO() return",
"parser.write_config_file(parsed, [\"virt_path\"]) holder[\"virt_path\"].getvalue() # Holds the config data \"\"\" import io class NotWithCloseStringIO(io.StringIO):",
"__exit__(self, type, value, traceback): pass class StringIOHolder(): \"\"\"Holds NotWithCloseStringIO objects and can be",
"\"\"\" def __init__(self): self._string_ios = {} def __call__(self, virt_path, bla): self._string_ios[virt_path] = NotWithCloseStringIO()",
"To use it, call for example: holder = StringIOHolder() parser = configargparse.ArgumentParser(description=\"Parser for",
"retrieved with the virtual path. This was implemented to use in configargparser in",
"Holds the config data \"\"\" import io class NotWithCloseStringIO(io.StringIO): \"\"\"This class is just",
"statement \"\"\" def __exit__(self, type, value, traceback): pass class StringIOHolder(): \"\"\"Holds NotWithCloseStringIO objects",
"\"\"\"This class is just the normal StringIO with the exception of not closing",
"to get an StringIO objects that can be written like a file but",
"StringIO objects that can be written like a file but does not close",
"traceback): pass class StringIOHolder(): \"\"\"Holds NotWithCloseStringIO objects and can be called to replace",
"called to replace an \"open\" call and write to memory file. File content",
"with the exception of not closing the memory file on exit of a",
"instance evolver\", config_file_open_func=holder) ... parsed = parser.parse_args() parser.write_config_file(parsed, [\"virt_path\"]) holder[\"virt_path\"].getvalue() # Holds the",
"config data \"\"\" import io class NotWithCloseStringIO(io.StringIO): \"\"\"This class is just the normal",
"written like a file but does not close at the end of a",
"the instance evolver\", config_file_open_func=holder) ... parsed = parser.parse_args() parser.write_config_file(parsed, [\"virt_path\"]) holder[\"virt_path\"].getvalue() # Holds",
"configargparse.ArgumentParser(description=\"Parser for the instance evolver\", config_file_open_func=holder) ... parsed = parser.parse_args() parser.write_config_file(parsed, [\"virt_path\"]) holder[\"virt_path\"].getvalue()",
"memory file on exit of a \"with\" statement \"\"\" def __exit__(self, type, value,",
"value, traceback): pass class StringIOHolder(): \"\"\"Holds NotWithCloseStringIO objects and can be called to",
"be used to get an StringIO objects that can be written like a",
"not close at the end of a \"with\" statement. Objects can be retrieved",
"self.string_ios[virt_path] def __get__(self, key): return self._string_ios[key] def close(self): for key in self._string_ios: self._string_ios[key].close()",
"can be called to replace an \"open\" call and write to memory file.",
"\"with\" statement \"\"\" def __exit__(self, type, value, traceback): pass class StringIOHolder(): \"\"\"Holds NotWithCloseStringIO",
"can be written like a file but does not close at the end",
"a \"with\" statement \"\"\" def __exit__(self, type, value, traceback): pass class StringIOHolder(): \"\"\"Holds",
"NotWithCloseStringIO(io.StringIO): \"\"\"This class is just the normal StringIO with the exception of not",
"... parsed = parser.parse_args() parser.write_config_file(parsed, [\"virt_path\"]) holder[\"virt_path\"].getvalue() # Holds the config data \"\"\"",
"for the instance evolver\", config_file_open_func=holder) ... parsed = parser.parse_args() parser.write_config_file(parsed, [\"virt_path\"]) holder[\"virt_path\"].getvalue() #",
"the virtual path. This was implemented to use in configargparser in mind. To",
"and can be called to replace an \"open\" call and write to memory",
"an StringIO objects that can be written like a file but does not",
"example: holder = StringIOHolder() parser = configargparse.ArgumentParser(description=\"Parser for the instance evolver\", config_file_open_func=holder) ...",
"close at the end of a \"with\" statement. Objects can be retrieved with",
"memory file. File content is then \"\"\" def __init__(self): self._string_ios = {} def",
"io class NotWithCloseStringIO(io.StringIO): \"\"\"This class is just the normal StringIO with the exception",
"\"\"\" import io class NotWithCloseStringIO(io.StringIO): \"\"\"This class is just the normal StringIO with",
"NotWithCloseStringIO() return self.string_ios[virt_path] def __get__(self, key): return self._string_ios[key] def close(self): for key in"
] |
[
"test_input, expected_wareki, expected_other): wareki, other = tmu_object.get_wareki(test_input) assert wareki == expected_wareki assert other",
"(\"start_No_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone(",
"= tmu_object.get_ad_default_year_dt_fmt(4, 3) assert datetime_format == datetime.datetime( 2020, 4, 3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0,",
"start, need_day, expected): print(pattern) result = tmu_object.create_dt_dict( end, start=start, need_day=need_day) assert result ==",
"import pytest from src.lib.time_util import TimeUtil import datetime @pytest.fixture(scope=\"module\", autouse=True) def tmu_object(): tmu",
"0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\",",
"tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\":",
"\"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]), (\"NO_start_needDay\", datetime.datetime( 2020, 3, 2), None, True, [{\"日付\":",
"] ) def test_create_dt_dict(self, tmu_object, pattern, end, start, need_day, expected): print(pattern) result =",
"2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')),",
"test_convert_wareki_to_ad(self, tmu_object): result = tmu_object.convert_wareki_to_ad('令和2年10月23日') assert result == \"2020-10-23T00:00:00+09:00\" def test_convert_wareki_to_ad_error(self, tmu_object): with",
"== expected_other def test_get_ymd_int_each(self, tmu_object): result = tmu_object.get_ymd_int_each('2年3月9日') assert result == [2, 3,",
"tmu_object): result = tmu_object.get_ymd_int_each('3月1日', need_year=False) assert result == [3, 1] def test_parse_date_span(self, tmu_object):",
"def test_convert_wareki_to_ad_error(self, tmu_object): with pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize( \"pattern, end, start, need_day, expected\", [",
"other = tmu_object.get_wareki(test_input) assert wareki == expected_wareki assert other == expected_other def test_get_ymd_int_each(self,",
"2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), True, [{\"日付\":",
"0, \"day\": 2}]) ] ) def test_create_dt_dict(self, tmu_object, pattern, end, start, need_day, expected):",
"'2年10月31日' ), ('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日', None, None)]) def test_get_wareki(self, tmu_object, test_input, expected_wareki,",
"= \"3月1日~ \\n3月2日\" result = tmu_object.get_dt_dict_from_text(target_char) assert result == [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1,",
"= tmu_object.get_dt_dict_from_text(target_char) assert result == [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\",",
"result == [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\":",
"\"test1~ \\ntest2\" result = tmu_object.parse_date_span(target_char) assert result == [\"test1\", \"test2\"] def test_get_ad_dt_fmt(self, tmu_object):",
"tmu_object): result = tmu_object.get_ymd_int_each('2年3月9日') assert result == [2, 3, 9] def test_get_ymd_int_each_2020(self, tmu_object):",
"== \"2020-04-29T00:00:00+09:00\" def test_get_ad_date_iso_fmt(self, tmu_object): iso_format = tmu_object.get_ad_date_iso_fmt(4, 3) assert iso_format == \"2020-04-03T00:00:00+09:00\"",
"def test_create_dt_dict(self, tmu_object, pattern, end, start, need_day, expected): print(pattern) result = tmu_object.create_dt_dict( end,",
"== \"2020-04-03T00:00:00+09:00\" def test_get_ad_default_year_dt_fmt(self, tmu_object): datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3) assert datetime_format == datetime.datetime(",
"\"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}] if __name__ == '__main__': pytest.main(['-v',",
"def test_get_ad_dt_fmt(self, tmu_object): iso_format = tmu_object.get_ad_dt_fmt('令和', 2, 4, 29) assert iso_format == \"2020-04-29T00:00:00+09:00\"",
"pytest from src.lib.time_util import TimeUtil import datetime @pytest.fixture(scope=\"module\", autouse=True) def tmu_object(): tmu =",
"'平成', '30年1月31日'), ('大正1年1月31日', None, None)]) def test_get_wareki(self, tmu_object, test_input, expected_wareki, expected_other): wareki, other",
"\"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_No_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0,",
"datetime.datetime( 2020, 3, 2), None, True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\":",
"== [2, 3, 9] def test_get_ymd_int_each_2020(self, tmu_object): result = tmu_object.get_ymd_int_each('3月1日', need_year=False) assert result",
"tmu class TestTimeUtil: @pytest.mark.parametrize(\"test_input, expected_wareki, expected_other\", [( '令和2年10月31日', '令和', '2年10月31日' ), ('平成30年1月31日', '平成',",
"tmu_object): with pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize( \"pattern, end, start, need_day, expected\", [ (\"No_start_No_needDay\", datetime.datetime(2020,",
"0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]),",
"0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0},",
"[{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]) ]",
"\"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]), (\"NO_start_needDay\", datetime.datetime( 2020,",
"end, start, need_day, expected\", [ (\"No_start_No_needDay\", datetime.datetime(2020, 3, 2), None, False, [{\"日付\": \"2020-03-01T00:00:00+09:00\",",
"== expected def test_get_dt_dict_from_text(self, tmu_object): target_char = \"3月1日~ \\n3月2日\" result = tmu_object.get_dt_dict_from_text(target_char) assert",
"assert result == [2, 3, 9] def test_get_ymd_int_each_2020(self, tmu_object): result = tmu_object.get_ymd_int_each('3月1日', need_year=False)",
"\"day\": 2}]) ] ) def test_create_dt_dict(self, tmu_object, pattern, end, start, need_day, expected): print(pattern)",
"3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST')) def test_convert_wareki_to_ad(self, tmu_object): result = tmu_object.convert_wareki_to_ad('令和2年10月23日') assert",
"assert other == expected_other def test_get_ymd_int_each(self, tmu_object): result = tmu_object.get_ymd_int_each('2年3月9日') assert result ==",
"need_year=False) assert result == [3, 1] def test_parse_date_span(self, tmu_object): target_char = \"test1~ \\ntest2\"",
"tmu_object.create_dt_dict( end, start=start, need_day=need_day) assert result == expected def test_get_dt_dict_from_text(self, tmu_object): target_char =",
"assert result == \"2020-10-23T00:00:00+09:00\" def test_convert_wareki_to_ad_error(self, tmu_object): with pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize( \"pattern, end,",
"start=start, need_day=need_day) assert result == expected def test_get_dt_dict_from_text(self, tmu_object): target_char = \"3月1日~ \\n3月2日\"",
"0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]), (\"NO_start_needDay\", datetime.datetime( 2020, 3, 2), None,",
"'令和2年10月31日', '令和', '2年10月31日' ), ('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日', None, None)]) def test_get_wareki(self, tmu_object,",
"tmu_object.get_ad_dt_fmt('令和', 2, 4, 29) assert iso_format == \"2020-04-29T00:00:00+09:00\" def test_get_ad_date_iso_fmt(self, tmu_object): iso_format =",
"test_get_dt_dict_from_text(self, tmu_object): target_char = \"3月1日~ \\n3月2日\" result = tmu_object.get_dt_dict_from_text(target_char) assert result == [{\"日付\":",
"result = tmu_object.get_dt_dict_from_text(target_char) assert result == [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\":",
"datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), False, [{\"日付\": \"2020-03-01T00:00:00+09:00\",",
"\"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]) ] )",
"False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_needDay\", datetime.datetime( 2020, 3,",
"expected_wareki, expected_other): wareki, other = tmu_object.get_wareki(test_input) assert wareki == expected_wareki assert other ==",
"tmu_object.get_ad_default_year_dt_fmt(4, 3) assert datetime_format == datetime.datetime( 2020, 4, 3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400),",
"datetime.datetime(2020, 3, 2), None, False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]),",
"(\"No_start_No_needDay\", datetime.datetime(2020, 3, 2), None, False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\":",
"class TestTimeUtil: @pytest.mark.parametrize(\"test_input, expected_wareki, expected_other\", [( '令和2年10月31日', '令和', '2年10月31日' ), ('平成30年1月31日', '平成', '30年1月31日'),",
"0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST')) def test_convert_wareki_to_ad(self, tmu_object): result = tmu_object.convert_wareki_to_ad('令和2年10月23日') assert result ==",
"0, \"day\": 2}]), (\"NO_start_needDay\", datetime.datetime( 2020, 3, 2), None, True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\":",
"datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9),",
"end, start, need_day, expected): print(pattern) result = tmu_object.create_dt_dict( end, start=start, need_day=need_day) assert result",
"[( '令和2年10月31日', '令和', '2年10月31日' ), ('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日', None, None)]) def test_get_wareki(self,",
"test_get_ymd_int_each(self, tmu_object): result = tmu_object.get_ymd_int_each('2年3月9日') assert result == [2, 3, 9] def test_get_ymd_int_each_2020(self,",
"\\ntest2\" result = tmu_object.parse_date_span(target_char) assert result == [\"test1\", \"test2\"] def test_get_ad_dt_fmt(self, tmu_object): iso_format",
"end, start=start, need_day=need_day) assert result == expected def test_get_dt_dict_from_text(self, tmu_object): target_char = \"3月1日~",
"result == expected def test_get_dt_dict_from_text(self, tmu_object): target_char = \"3月1日~ \\n3月2日\" result = tmu_object.get_dt_dict_from_text(target_char)",
"[\"test1\", \"test2\"] def test_get_ad_dt_fmt(self, tmu_object): iso_format = tmu_object.get_ad_dt_fmt('令和', 2, 4, 29) assert iso_format",
"tmu_object): target_char = \"3月1日~ \\n3月2日\" result = tmu_object.get_dt_dict_from_text(target_char) assert result == [{\"日付\": \"2020-03-01T00:00:00+09:00\",",
"0}]), (\"start_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0,",
"\"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}] if __name__",
"= tmu_object.convert_wareki_to_ad('令和2年10月23日') assert result == \"2020-10-23T00:00:00+09:00\" def test_convert_wareki_to_ad_error(self, tmu_object): with pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize(",
"\"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]) ] ) def",
"assert datetime_format == datetime.datetime( 2020, 4, 3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST')) def",
"result == \"2020-10-23T00:00:00+09:00\" def test_convert_wareki_to_ad_error(self, tmu_object): with pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize( \"pattern, end, start,",
"2}]), (\"NO_start_needDay\", datetime.datetime( 2020, 3, 2), None, True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\":",
"== expected_wareki assert other == expected_other def test_get_ymd_int_each(self, tmu_object): result = tmu_object.get_ymd_int_each('2年3月9日') assert",
"\"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}] if __name__ ==",
"pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize( \"pattern, end, start, need_day, expected\", [ (\"No_start_No_needDay\", datetime.datetime(2020, 3, 2),",
"True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}])",
"assert result == [\"test1\", \"test2\"] def test_get_ad_dt_fmt(self, tmu_object): iso_format = tmu_object.get_ad_dt_fmt('令和', 2, 4,",
"datetime.timedelta(hours=9), name='JST')), True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0,",
"result = tmu_object.create_dt_dict( end, start=start, need_day=need_day) assert result == expected def test_get_dt_dict_from_text(self, tmu_object):",
"need_day, expected): print(pattern) result = tmu_object.create_dt_dict( end, start=start, need_day=need_day) assert result == expected",
"print(pattern) result = tmu_object.create_dt_dict( end, start=start, need_day=need_day) assert result == expected def test_get_dt_dict_from_text(self,",
"29) assert iso_format == \"2020-04-29T00:00:00+09:00\" def test_get_ad_date_iso_fmt(self, tmu_object): iso_format = tmu_object.get_ad_date_iso_fmt(4, 3) assert",
"0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\":",
"\"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]), (\"NO_start_needDay\", datetime.datetime(",
"with pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize( \"pattern, end, start, need_day, expected\", [ (\"No_start_No_needDay\", datetime.datetime(2020, 3,",
"iso_format = tmu_object.get_ad_dt_fmt('令和', 2, 4, 29) assert iso_format == \"2020-04-29T00:00:00+09:00\" def test_get_ad_date_iso_fmt(self, tmu_object):",
"tmu_object): iso_format = tmu_object.get_ad_date_iso_fmt(4, 3) assert iso_format == \"2020-04-03T00:00:00+09:00\" def test_get_ad_default_year_dt_fmt(self, tmu_object): datetime_format",
"result = tmu_object.get_ymd_int_each('2年3月9日') assert result == [2, 3, 9] def test_get_ymd_int_each_2020(self, tmu_object): result",
"assert iso_format == \"2020-04-29T00:00:00+09:00\" def test_get_ad_date_iso_fmt(self, tmu_object): iso_format = tmu_object.get_ad_date_iso_fmt(4, 3) assert iso_format",
"tmu_object): result = tmu_object.convert_wareki_to_ad('令和2年10月23日') assert result == \"2020-10-23T00:00:00+09:00\" def test_convert_wareki_to_ad_error(self, tmu_object): with pytest.raises(ValueError):",
"[{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_needDay\", datetime.datetime( 2020, 3, 2),",
"0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST')) def test_convert_wareki_to_ad(self, tmu_object): result = tmu_object.convert_wareki_to_ad('令和2年10月23日') assert result",
"result == [\"test1\", \"test2\"] def test_get_ad_dt_fmt(self, tmu_object): iso_format = tmu_object.get_ad_dt_fmt('令和', 2, 4, 29)",
"\"小計\": 0}]), (\"start_No_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0,",
"\"day\": 2}]), (\"NO_start_needDay\", datetime.datetime( 2020, 3, 2), None, True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1,",
"\"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]) ] ) def test_create_dt_dict(self, tmu_object, pattern, end, start,",
"\"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]) ] ) def test_create_dt_dict(self, tmu_object,",
"[3, 1] def test_parse_date_span(self, tmu_object): target_char = \"test1~ \\ntest2\" result = tmu_object.parse_date_span(target_char) assert",
"def test_get_ad_date_iso_fmt(self, tmu_object): iso_format = tmu_object.get_ad_date_iso_fmt(4, 3) assert iso_format == \"2020-04-03T00:00:00+09:00\" def test_get_ad_default_year_dt_fmt(self,",
"\"2020-10-23T00:00:00+09:00\" def test_convert_wareki_to_ad_error(self, tmu_object): with pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize( \"pattern, end, start, need_day, expected\",",
"32400), 'JST')) def test_convert_wareki_to_ad(self, tmu_object): result = tmu_object.convert_wareki_to_ad('令和2年10月23日') assert result == \"2020-10-23T00:00:00+09:00\" def",
"need_day, expected\", [ (\"No_start_No_needDay\", datetime.datetime(2020, 3, 2), None, False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0},",
"name='JST')), False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_needDay\", datetime.datetime( 2020,",
"{\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1, 0,",
"2}]) ] ) def test_create_dt_dict(self, tmu_object, pattern, end, start, need_day, expected): print(pattern) result",
"[2, 3, 9] def test_get_ymd_int_each_2020(self, tmu_object): result = tmu_object.get_ymd_int_each('3月1日', need_year=False) assert result ==",
"9] def test_get_ymd_int_each_2020(self, tmu_object): result = tmu_object.get_ymd_int_each('3月1日', need_year=False) assert result == [3, 1]",
"\"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020,",
"test_parse_date_span(self, tmu_object): target_char = \"test1~ \\ntest2\" result = tmu_object.parse_date_span(target_char) assert result == [\"test1\",",
"True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]),",
"2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), False, [{\"日付\":",
"4, 29) assert iso_format == \"2020-04-29T00:00:00+09:00\" def test_get_ad_date_iso_fmt(self, tmu_object): iso_format = tmu_object.get_ad_date_iso_fmt(4, 3)",
"\"小計\": 0, \"day\": 2}]) ] ) def test_create_dt_dict(self, tmu_object, pattern, end, start, need_day,",
"datetime @pytest.fixture(scope=\"module\", autouse=True) def tmu_object(): tmu = TimeUtil() yield tmu class TestTimeUtil: @pytest.mark.parametrize(\"test_input,",
"= tmu_object.get_ad_dt_fmt('令和', 2, 4, 29) assert iso_format == \"2020-04-29T00:00:00+09:00\" def test_get_ad_date_iso_fmt(self, tmu_object): iso_format",
"need_day=need_day) assert result == expected def test_get_dt_dict_from_text(self, tmu_object): target_char = \"3月1日~ \\n3月2日\" result",
"TimeUtil() yield tmu class TestTimeUtil: @pytest.mark.parametrize(\"test_input, expected_wareki, expected_other\", [( '令和2年10月31日', '令和', '2年10月31日' ),",
"tmu_object.get_dt_dict_from_text(target_char) assert result == [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\":",
"@pytest.mark.parametrize( \"pattern, end, start, need_day, expected\", [ (\"No_start_No_needDay\", datetime.datetime(2020, 3, 2), None, False,",
"1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1,",
"iso_format == \"2020-04-03T00:00:00+09:00\" def test_get_ad_default_year_dt_fmt(self, tmu_object): datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3) assert datetime_format ==",
"def test_get_ymd_int_each(self, tmu_object): result = tmu_object.get_ymd_int_each('2年3月9日') assert result == [2, 3, 9] def",
"tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize( \"pattern, end, start, need_day, expected\", [ (\"No_start_No_needDay\", datetime.datetime(2020, 3, 2), None,",
"def test_get_dt_dict_from_text(self, tmu_object): target_char = \"3月1日~ \\n3月2日\" result = tmu_object.get_dt_dict_from_text(target_char) assert result ==",
"target_char = \"test1~ \\ntest2\" result = tmu_object.parse_date_span(target_char) assert result == [\"test1\", \"test2\"] def",
"expected_other\", [( '令和2年10月31日', '令和', '2年10月31日' ), ('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日', None, None)]) def",
"autouse=True) def tmu_object(): tmu = TimeUtil() yield tmu class TestTimeUtil: @pytest.mark.parametrize(\"test_input, expected_wareki, expected_other\",",
"iso_format == \"2020-04-29T00:00:00+09:00\" def test_get_ad_date_iso_fmt(self, tmu_object): iso_format = tmu_object.get_ad_date_iso_fmt(4, 3) assert iso_format ==",
"tmu_object.get_wareki(test_input) assert wareki == expected_wareki assert other == expected_other def test_get_ymd_int_each(self, tmu_object): result",
"from src.lib.time_util import TimeUtil import datetime @pytest.fixture(scope=\"module\", autouse=True) def tmu_object(): tmu = TimeUtil()",
"@pytest.fixture(scope=\"module\", autouse=True) def tmu_object(): tmu = TimeUtil() yield tmu class TestTimeUtil: @pytest.mark.parametrize(\"test_input, expected_wareki,",
"tmu_object.get_ymd_int_each('3月1日', need_year=False) assert result == [3, 1] def test_parse_date_span(self, tmu_object): target_char = \"test1~",
"3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), True,",
"0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\":",
"{\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]) ] ) def test_create_dt_dict(self, tmu_object, pattern, end,",
"3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), False,",
"expected_wareki, expected_other\", [( '令和2年10月31日', '令和', '2年10月31日' ), ('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日', None, None)])",
"result = tmu_object.convert_wareki_to_ad('令和2年10月23日') assert result == \"2020-10-23T00:00:00+09:00\" def test_convert_wareki_to_ad_error(self, tmu_object): with pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日')",
"\"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0,",
"3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\":",
"def tmu_object(): tmu = TimeUtil() yield tmu class TestTimeUtil: @pytest.mark.parametrize(\"test_input, expected_wareki, expected_other\", [(",
"[{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]), (\"NO_start_needDay\",",
"0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\":",
"\"小計\": 0}]), (\"start_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0,",
"1] def test_parse_date_span(self, tmu_object): target_char = \"test1~ \\ntest2\" result = tmu_object.parse_date_span(target_char) assert result",
"('大正1年1月31日', None, None)]) def test_get_wareki(self, tmu_object, test_input, expected_wareki, expected_other): wareki, other = tmu_object.get_wareki(test_input)",
"test_get_ad_date_iso_fmt(self, tmu_object): iso_format = tmu_object.get_ad_date_iso_fmt(4, 3) assert iso_format == \"2020-04-03T00:00:00+09:00\" def test_get_ad_default_year_dt_fmt(self, tmu_object):",
"test_convert_wareki_to_ad_error(self, tmu_object): with pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize( \"pattern, end, start, need_day, expected\", [ (\"No_start_No_needDay\",",
"[ (\"No_start_No_needDay\", datetime.datetime(2020, 3, 2), None, False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\",",
"False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_No_needDay\", datetime.datetime( 2020, 3,",
"{\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]), (\"NO_start_needDay\", datetime.datetime( 2020, 3, 2), None, True,",
"\"小計\": 0, \"day\": 2}]), (\"NO_start_needDay\", datetime.datetime( 2020, 3, 2), None, True, [{\"日付\": \"2020-03-01T00:00:00+09:00\",",
"assert result == [3, 1] def test_parse_date_span(self, tmu_object): target_char = \"test1~ \\ntest2\" result",
"= tmu_object.create_dt_dict( end, start=start, need_day=need_day) assert result == expected def test_get_dt_dict_from_text(self, tmu_object): target_char",
"[{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}] if",
"'令和', '2年10月31日' ), ('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日', None, None)]) def test_get_wareki(self, tmu_object, test_input,",
"other == expected_other def test_get_ymd_int_each(self, tmu_object): result = tmu_object.get_ymd_int_each('2年3月9日') assert result == [2,",
"start, need_day, expected\", [ (\"No_start_No_needDay\", datetime.datetime(2020, 3, 2), None, False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\":",
"3) assert iso_format == \"2020-04-03T00:00:00+09:00\" def test_get_ad_default_year_dt_fmt(self, tmu_object): datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3) assert",
"== [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]",
"expected_other): wareki, other = tmu_object.get_wareki(test_input) assert wareki == expected_wareki assert other == expected_other",
"tmu_object.get_ymd_int_each('2年3月9日') assert result == [2, 3, 9] def test_get_ymd_int_each_2020(self, tmu_object): result = tmu_object.get_ymd_int_each('3月1日',",
"tmu = TimeUtil() yield tmu class TestTimeUtil: @pytest.mark.parametrize(\"test_input, expected_wareki, expected_other\", [( '令和2年10月31日', '令和',",
"def test_convert_wareki_to_ad(self, tmu_object): result = tmu_object.convert_wareki_to_ad('令和2年10月23日') assert result == \"2020-10-23T00:00:00+09:00\" def test_convert_wareki_to_ad_error(self, tmu_object):",
"expected): print(pattern) result = tmu_object.create_dt_dict( end, start=start, need_day=need_day) assert result == expected def",
"assert result == expected def test_get_dt_dict_from_text(self, tmu_object): target_char = \"3月1日~ \\n3月2日\" result =",
"tmu_object): target_char = \"test1~ \\ntest2\" result = tmu_object.parse_date_span(target_char) assert result == [\"test1\", \"test2\"]",
"tmu_object): datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3) assert datetime_format == datetime.datetime( 2020, 4, 3, 0,",
"1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}] if __name__ == '__main__':",
"\"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3,",
"3, 9] def test_get_ymd_int_each_2020(self, tmu_object): result = tmu_object.get_ymd_int_each('3月1日', need_year=False) assert result == [3,",
"= tmu_object.get_wareki(test_input) assert wareki == expected_wareki assert other == expected_other def test_get_ymd_int_each(self, tmu_object):",
"result = tmu_object.get_ymd_int_each('3月1日', need_year=False) assert result == [3, 1] def test_parse_date_span(self, tmu_object): target_char",
"expected_other def test_get_ymd_int_each(self, tmu_object): result = tmu_object.get_ymd_int_each('2年3月9日') assert result == [2, 3, 9]",
"\"test2\"] def test_get_ad_dt_fmt(self, tmu_object): iso_format = tmu_object.get_ad_dt_fmt('令和', 2, 4, 29) assert iso_format ==",
"test_get_ad_dt_fmt(self, tmu_object): iso_format = tmu_object.get_ad_dt_fmt('令和', 2, 4, 29) assert iso_format == \"2020-04-29T00:00:00+09:00\" def",
"None, True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\":",
"4, 3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST')) def test_convert_wareki_to_ad(self, tmu_object): result = tmu_object.convert_wareki_to_ad('令和2年10月23日')",
"1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0},",
"1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]) ] ) def test_create_dt_dict(self,",
"name='JST')), True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\":",
"def test_get_wareki(self, tmu_object, test_input, expected_wareki, expected_other): wareki, other = tmu_object.get_wareki(test_input) assert wareki ==",
"tmu_object.get_ad_date_iso_fmt(4, 3) assert iso_format == \"2020-04-03T00:00:00+09:00\" def test_get_ad_default_year_dt_fmt(self, tmu_object): datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3)",
"0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_No_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1,",
"test_get_wareki(self, tmu_object, test_input, expected_wareki, expected_other): wareki, other = tmu_object.get_wareki(test_input) assert wareki == expected_wareki",
"yield tmu class TestTimeUtil: @pytest.mark.parametrize(\"test_input, expected_wareki, expected_other\", [( '令和2年10月31日', '令和', '2年10月31日' ), ('平成30年1月31日',",
"datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), True, [{\"日付\": \"2020-03-01T00:00:00+09:00\",",
"result == [2, 3, 9] def test_get_ymd_int_each_2020(self, tmu_object): result = tmu_object.get_ymd_int_each('3月1日', need_year=False) assert",
"'JST')) def test_convert_wareki_to_ad(self, tmu_object): result = tmu_object.convert_wareki_to_ad('令和2年10月23日') assert result == \"2020-10-23T00:00:00+09:00\" def test_convert_wareki_to_ad_error(self,",
"1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]), (\"NO_start_needDay\", datetime.datetime( 2020, 3,",
"2), None, True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0,",
"test_create_dt_dict(self, tmu_object, pattern, end, start, need_day, expected): print(pattern) result = tmu_object.create_dt_dict( end, start=start,",
"0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}] if __name__ == '__main__': pytest.main(['-v', __file__])",
"result == [3, 1] def test_parse_date_span(self, tmu_object): target_char = \"test1~ \\ntest2\" result =",
"'30年1月31日'), ('大正1年1月31日', None, None)]) def test_get_wareki(self, tmu_object, test_input, expected_wareki, expected_other): wareki, other =",
"def test_parse_date_span(self, tmu_object): target_char = \"test1~ \\ntest2\" result = tmu_object.parse_date_span(target_char) assert result ==",
"assert iso_format == \"2020-04-03T00:00:00+09:00\" def test_get_ad_default_year_dt_fmt(self, tmu_object): datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3) assert datetime_format",
"test_get_ad_default_year_dt_fmt(self, tmu_object): datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3) assert datetime_format == datetime.datetime( 2020, 4, 3,",
") def test_create_dt_dict(self, tmu_object, pattern, end, start, need_day, expected): print(pattern) result = tmu_object.create_dt_dict(",
"3) assert datetime_format == datetime.datetime( 2020, 4, 3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST'))",
"= tmu_object.get_ad_date_iso_fmt(4, 3) assert iso_format == \"2020-04-03T00:00:00+09:00\" def test_get_ad_default_year_dt_fmt(self, tmu_object): datetime_format = tmu_object.get_ad_default_year_dt_fmt(4,",
"= tmu_object.parse_date_span(target_char) assert result == [\"test1\", \"test2\"] def test_get_ad_dt_fmt(self, tmu_object): iso_format = tmu_object.get_ad_dt_fmt('令和',",
"assert result == [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0,",
"0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\",",
"datetime.datetime( 2020, 4, 3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST')) def test_convert_wareki_to_ad(self, tmu_object): result",
"\"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_No_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020,",
"datetime_format == datetime.datetime( 2020, 4, 3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST')) def test_convert_wareki_to_ad(self,",
"assert wareki == expected_wareki assert other == expected_other def test_get_ymd_int_each(self, tmu_object): result =",
"tmu_object, test_input, expected_wareki, expected_other): wareki, other = tmu_object.get_wareki(test_input) assert wareki == expected_wareki assert",
"wareki == expected_wareki assert other == expected_other def test_get_ymd_int_each(self, tmu_object): result = tmu_object.get_ymd_int_each('2年3月9日')",
"tmu_object.parse_date_span(target_char) assert result == [\"test1\", \"test2\"] def test_get_ad_dt_fmt(self, tmu_object): iso_format = tmu_object.get_ad_dt_fmt('令和', 2,",
"2, 4, 29) assert iso_format == \"2020-04-29T00:00:00+09:00\" def test_get_ad_date_iso_fmt(self, tmu_object): iso_format = tmu_object.get_ad_date_iso_fmt(4,",
"== datetime.datetime( 2020, 4, 3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST')) def test_convert_wareki_to_ad(self, tmu_object):",
"import TimeUtil import datetime @pytest.fixture(scope=\"module\", autouse=True) def tmu_object(): tmu = TimeUtil() yield tmu",
"test_get_ymd_int_each_2020(self, tmu_object): result = tmu_object.get_ymd_int_each('3月1日', need_year=False) assert result == [3, 1] def test_parse_date_span(self,",
"expected def test_get_dt_dict_from_text(self, tmu_object): target_char = \"3月1日~ \\n3月2日\" result = tmu_object.get_dt_dict_from_text(target_char) assert result",
"@pytest.mark.parametrize(\"test_input, expected_wareki, expected_other\", [( '令和2年10月31日', '令和', '2年10月31日' ), ('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日', None,",
"TestTimeUtil: @pytest.mark.parametrize(\"test_input, expected_wareki, expected_other\", [( '令和2年10月31日', '令和', '2年10月31日' ), ('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日',",
"(\"start_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone(",
"result = tmu_object.parse_date_span(target_char) assert result == [\"test1\", \"test2\"] def test_get_ad_dt_fmt(self, tmu_object): iso_format =",
"= tmu_object.get_ymd_int_each('2年3月9日') assert result == [2, 3, 9] def test_get_ymd_int_each_2020(self, tmu_object): result =",
"2), None, False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_No_needDay\", datetime.datetime(",
"3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\":",
"\"3月1日~ \\n3月2日\" result = tmu_object.get_dt_dict_from_text(target_char) assert result == [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\":",
"\"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]), (\"NO_start_needDay\", datetime.datetime( 2020, 3, 2),",
"target_char = \"3月1日~ \\n3月2日\" result = tmu_object.get_dt_dict_from_text(target_char) assert result == [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\":",
"0}]), (\"start_No_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0,",
"[{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_No_needDay\", datetime.datetime( 2020, 3, 2),",
"pattern, end, start, need_day, expected): print(pattern) result = tmu_object.create_dt_dict( end, start=start, need_day=need_day) assert",
"2020, 3, 2), None, True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\",",
"def test_get_ad_default_year_dt_fmt(self, tmu_object): datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3) assert datetime_format == datetime.datetime( 2020, 4,",
"None, False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_No_needDay\", datetime.datetime( 2020,",
"tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_needDay\",",
"0, 0, tzinfo=datetime.timezone( datetime.timedelta(hours=9), name='JST')), False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\":",
"\\n3月2日\" result = tmu_object.get_dt_dict_from_text(target_char) assert result == [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0},",
"tmu_object.convert_wareki_to_ad('令和2年10月23日') assert result == \"2020-10-23T00:00:00+09:00\" def test_convert_wareki_to_ad_error(self, tmu_object): with pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize( \"pattern,",
"None)]) def test_get_wareki(self, tmu_object, test_input, expected_wareki, expected_other): wareki, other = tmu_object.get_wareki(test_input) assert wareki",
"= TimeUtil() yield tmu class TestTimeUtil: @pytest.mark.parametrize(\"test_input, expected_wareki, expected_other\", [( '令和2年10月31日', '令和', '2年10月31日'",
"== \"2020-10-23T00:00:00+09:00\" def test_convert_wareki_to_ad_error(self, tmu_object): with pytest.raises(ValueError): tmu_object.convert_wareki_to_ad('大正2年10月23日') @pytest.mark.parametrize( \"pattern, end, start, need_day,",
"\"2020-04-03T00:00:00+09:00\" def test_get_ad_default_year_dt_fmt(self, tmu_object): datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3) assert datetime_format == datetime.datetime( 2020,",
"datetime.timedelta(hours=9), name='JST')), False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_needDay\", datetime.datetime(",
"src.lib.time_util import TimeUtil import datetime @pytest.fixture(scope=\"module\", autouse=True) def tmu_object(): tmu = TimeUtil() yield",
"import datetime @pytest.fixture(scope=\"module\", autouse=True) def tmu_object(): tmu = TimeUtil() yield tmu class TestTimeUtil:",
"\"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_No_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3,",
"('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日', None, None)]) def test_get_wareki(self, tmu_object, test_input, expected_wareki, expected_other): wareki,",
"3, 2), None, False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_No_needDay\",",
"== [\"test1\", \"test2\"] def test_get_ad_dt_fmt(self, tmu_object): iso_format = tmu_object.get_ad_dt_fmt('令和', 2, 4, 29) assert",
"3, 2), None, True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\":",
"2020, 4, 3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST')) def test_convert_wareki_to_ad(self, tmu_object): result =",
"wareki, other = tmu_object.get_wareki(test_input) assert wareki == expected_wareki assert other == expected_other def",
"), ('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日', None, None)]) def test_get_wareki(self, tmu_object, test_input, expected_wareki, expected_other):",
"tmu_object): iso_format = tmu_object.get_ad_dt_fmt('令和', 2, 4, 29) assert iso_format == \"2020-04-29T00:00:00+09:00\" def test_get_ad_date_iso_fmt(self,",
"tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST')) def test_convert_wareki_to_ad(self, tmu_object): result = tmu_object.convert_wareki_to_ad('令和2年10月23日') assert result == \"2020-10-23T00:00:00+09:00\"",
"expected_wareki assert other == expected_other def test_get_ymd_int_each(self, tmu_object): result = tmu_object.get_ymd_int_each('2年3月9日') assert result",
"\"pattern, end, start, need_day, expected\", [ (\"No_start_No_needDay\", datetime.datetime(2020, 3, 2), None, False, [{\"日付\":",
"tmu_object(): tmu = TimeUtil() yield tmu class TestTimeUtil: @pytest.mark.parametrize(\"test_input, expected_wareki, expected_other\", [( '令和2年10月31日',",
"= tmu_object.get_ymd_int_each('3月1日', need_year=False) assert result == [3, 1] def test_parse_date_span(self, tmu_object): target_char =",
"\"2020-04-29T00:00:00+09:00\" def test_get_ad_date_iso_fmt(self, tmu_object): iso_format = tmu_object.get_ad_date_iso_fmt(4, 3) assert iso_format == \"2020-04-03T00:00:00+09:00\" def",
"(\"NO_start_needDay\", datetime.datetime( 2020, 3, 2), None, True, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"day\": 1, \"小計\": 0},",
"= \"test1~ \\ntest2\" result = tmu_object.parse_date_span(target_char) assert result == [\"test1\", \"test2\"] def test_get_ad_dt_fmt(self,",
"datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3) assert datetime_format == datetime.datetime( 2020, 4, 3, 0, 0,",
"0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1,",
"{\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0}]), (\"start_No_needDay\", datetime.datetime( 2020, 3, 2), datetime.datetime(2020, 3, 1, 0,",
"== [3, 1] def test_parse_date_span(self, tmu_object): target_char = \"test1~ \\ntest2\" result = tmu_object.parse_date_span(target_char)",
"TimeUtil import datetime @pytest.fixture(scope=\"module\", autouse=True) def tmu_object(): tmu = TimeUtil() yield tmu class",
"None, None)]) def test_get_wareki(self, tmu_object, test_input, expected_wareki, expected_other): wareki, other = tmu_object.get_wareki(test_input) assert",
"def test_get_ymd_int_each_2020(self, tmu_object): result = tmu_object.get_ymd_int_each('3月1日', need_year=False) assert result == [3, 1] def",
"0}, {\"日付\": \"2020-03-02T00:00:00+09:00\", \"小計\": 0, \"day\": 2}]) ] ) def test_create_dt_dict(self, tmu_object, pattern,",
"tmu_object, pattern, end, start, need_day, expected): print(pattern) result = tmu_object.create_dt_dict( end, start=start, need_day=need_day)",
"iso_format = tmu_object.get_ad_date_iso_fmt(4, 3) assert iso_format == \"2020-04-03T00:00:00+09:00\" def test_get_ad_default_year_dt_fmt(self, tmu_object): datetime_format =",
"expected\", [ (\"No_start_No_needDay\", datetime.datetime(2020, 3, 2), None, False, [{\"日付\": \"2020-03-01T00:00:00+09:00\", \"小計\": 0}, {\"日付\":"
] |
[
"trade in self.Trades: trade.Gammas(SCENARIO,RISK) def Vegas(self,SCENARIO,RISK): for trade in self.Trades: trade.Vegas(SCENARIO,RISK) def Rhos(self,SCENARIO,RISK):",
"def NPV(self,SCENARIO): dblTotal = 0.0 for trade in self.Trades: dblTotal += trade.NPV(SCENARIO) return",
"def Deltas(self,SCENARIO,RISK): for trade in self.Trades: trade.Deltas(SCENARIO,RISK) def Gammas(self,SCENARIO,RISK): for trade in self.Trades:",
"items that are the trades in the portfolio # run-time interpretation will handle",
"self.Trades = [] self.Cash = 0.0 def Append(self,X): self.Trades.append(X) def TradeCount(self): return len(self.Trades)",
"me Trades = [] Cash = 0.0 Name = '' def __init__(self,name=''): self.Name",
"0.0 for trade in self.Trades: dblTotal += trade.NPV(SCENARIO) return dblTotal def Deltas(self,SCENARIO,RISK): for",
"Trades = [] Cash = 0.0 Name = '' def __init__(self,name=''): self.Name =",
"of items that are the trades in the portfolio # run-time interpretation will",
"NPV(self,SCENARIO): dblTotal = 0.0 for trade in self.Trades: dblTotal += trade.NPV(SCENARIO) return dblTotal",
"array of items that are the trades in the portfolio # run-time interpretation",
"Name = '' def __init__(self,name=''): self.Name = name self.Trades = [] self.Cash =",
"trades in the portfolio # run-time interpretation will handle polymorphism for me Trades",
"self.Trades.append(X) def TradeCount(self): return len(self.Trades) def NPV(self,SCENARIO): dblTotal = 0.0 for trade in",
"the trades in the portfolio # run-time interpretation will handle polymorphism for me",
"in the portfolio # run-time interpretation will handle polymorphism for me Trades =",
"in self.Trades: trade.Deltas(SCENARIO,RISK) def Gammas(self,SCENARIO,RISK): for trade in self.Trades: trade.Gammas(SCENARIO,RISK) def Vegas(self,SCENARIO,RISK): for",
"TradeCount(self): return len(self.Trades) def NPV(self,SCENARIO): dblTotal = 0.0 for trade in self.Trades: dblTotal",
"'' def __init__(self,name=''): self.Name = name self.Trades = [] self.Cash = 0.0 def",
"trade in self.Trades: trade.Deltas(SCENARIO,RISK) def Gammas(self,SCENARIO,RISK): for trade in self.Trades: trade.Gammas(SCENARIO,RISK) def Vegas(self,SCENARIO,RISK):",
"def Gammas(self,SCENARIO,RISK): for trade in self.Trades: trade.Gammas(SCENARIO,RISK) def Vegas(self,SCENARIO,RISK): for trade in self.Trades:",
"name self.Trades = [] self.Cash = 0.0 def Append(self,X): self.Trades.append(X) def TradeCount(self): return",
"# run-time interpretation will handle polymorphism for me Trades = [] Cash =",
"len(self.Trades) def NPV(self,SCENARIO): dblTotal = 0.0 for trade in self.Trades: dblTotal += trade.NPV(SCENARIO)",
"Gammas(self,SCENARIO,RISK): for trade in self.Trades: trade.Gammas(SCENARIO,RISK) def Vegas(self,SCENARIO,RISK): for trade in self.Trades: trade.Vegas(SCENARIO,RISK)",
"# define array of items that are the trades in the portfolio #",
"dblTotal def Deltas(self,SCENARIO,RISK): for trade in self.Trades: trade.Deltas(SCENARIO,RISK) def Gammas(self,SCENARIO,RISK): for trade in",
"+= trade.NPV(SCENARIO) return dblTotal def Deltas(self,SCENARIO,RISK): for trade in self.Trades: trade.Deltas(SCENARIO,RISK) def Gammas(self,SCENARIO,RISK):",
"that are the trades in the portfolio # run-time interpretation will handle polymorphism",
"dblTotal = 0.0 for trade in self.Trades: dblTotal += trade.NPV(SCENARIO) return dblTotal def",
"return len(self.Trades) def NPV(self,SCENARIO): dblTotal = 0.0 for trade in self.Trades: dblTotal +=",
"self.Trades: dblTotal += trade.NPV(SCENARIO) return dblTotal def Deltas(self,SCENARIO,RISK): for trade in self.Trades: trade.Deltas(SCENARIO,RISK)",
"in self.Trades: trade.Gammas(SCENARIO,RISK) def Vegas(self,SCENARIO,RISK): for trade in self.Trades: trade.Vegas(SCENARIO,RISK) def Rhos(self,SCENARIO,RISK): for",
"def TradeCount(self): return len(self.Trades) def NPV(self,SCENARIO): dblTotal = 0.0 for trade in self.Trades:",
"= 0.0 for trade in self.Trades: dblTotal += trade.NPV(SCENARIO) return dblTotal def Deltas(self,SCENARIO,RISK):",
"trade.Deltas(SCENARIO,RISK) def Gammas(self,SCENARIO,RISK): for trade in self.Trades: trade.Gammas(SCENARIO,RISK) def Vegas(self,SCENARIO,RISK): for trade in",
"= '' def __init__(self,name=''): self.Name = name self.Trades = [] self.Cash = 0.0",
"def Vegas(self,SCENARIO,RISK): for trade in self.Trades: trade.Vegas(SCENARIO,RISK) def Rhos(self,SCENARIO,RISK): for trade in self.Trades:",
"for trade in self.Trades: dblTotal += trade.NPV(SCENARIO) return dblTotal def Deltas(self,SCENARIO,RISK): for trade",
"the portfolio # run-time interpretation will handle polymorphism for me Trades = []",
"define array of items that are the trades in the portfolio # run-time",
"for me Trades = [] Cash = 0.0 Name = '' def __init__(self,name=''):",
"Vegas(self,SCENARIO,RISK): for trade in self.Trades: trade.Vegas(SCENARIO,RISK) def Rhos(self,SCENARIO,RISK): for trade in self.Trades: trade.Rhos(SCENARIO,RISK)",
"portfolio # run-time interpretation will handle polymorphism for me Trades = [] Cash",
"def Append(self,X): self.Trades.append(X) def TradeCount(self): return len(self.Trades) def NPV(self,SCENARIO): dblTotal = 0.0 for",
"self.Trades: trade.Gammas(SCENARIO,RISK) def Vegas(self,SCENARIO,RISK): for trade in self.Trades: trade.Vegas(SCENARIO,RISK) def Rhos(self,SCENARIO,RISK): for trade",
"handle polymorphism for me Trades = [] Cash = 0.0 Name = ''",
"= [] Cash = 0.0 Name = '' def __init__(self,name=''): self.Name = name",
"= name self.Trades = [] self.Cash = 0.0 def Append(self,X): self.Trades.append(X) def TradeCount(self):",
"0.0 def Append(self,X): self.Trades.append(X) def TradeCount(self): return len(self.Trades) def NPV(self,SCENARIO): dblTotal = 0.0",
"polymorphism for me Trades = [] Cash = 0.0 Name = '' def",
"__init__(self,name=''): self.Name = name self.Trades = [] self.Cash = 0.0 def Append(self,X): self.Trades.append(X)",
"class Portfolio: # define array of items that are the trades in the",
"[] Cash = 0.0 Name = '' def __init__(self,name=''): self.Name = name self.Trades",
"= 0.0 def Append(self,X): self.Trades.append(X) def TradeCount(self): return len(self.Trades) def NPV(self,SCENARIO): dblTotal =",
"interpretation will handle polymorphism for me Trades = [] Cash = 0.0 Name",
"trade in self.Trades: dblTotal += trade.NPV(SCENARIO) return dblTotal def Deltas(self,SCENARIO,RISK): for trade in",
"in self.Trades: dblTotal += trade.NPV(SCENARIO) return dblTotal def Deltas(self,SCENARIO,RISK): for trade in self.Trades:",
"will handle polymorphism for me Trades = [] Cash = 0.0 Name =",
"dblTotal += trade.NPV(SCENARIO) return dblTotal def Deltas(self,SCENARIO,RISK): for trade in self.Trades: trade.Deltas(SCENARIO,RISK) def",
"0.0 Name = '' def __init__(self,name=''): self.Name = name self.Trades = [] self.Cash",
"for trade in self.Trades: trade.Deltas(SCENARIO,RISK) def Gammas(self,SCENARIO,RISK): for trade in self.Trades: trade.Gammas(SCENARIO,RISK) def",
"return dblTotal def Deltas(self,SCENARIO,RISK): for trade in self.Trades: trade.Deltas(SCENARIO,RISK) def Gammas(self,SCENARIO,RISK): for trade",
"Append(self,X): self.Trades.append(X) def TradeCount(self): return len(self.Trades) def NPV(self,SCENARIO): dblTotal = 0.0 for trade",
"= [] self.Cash = 0.0 def Append(self,X): self.Trades.append(X) def TradeCount(self): return len(self.Trades) def",
"Deltas(self,SCENARIO,RISK): for trade in self.Trades: trade.Deltas(SCENARIO,RISK) def Gammas(self,SCENARIO,RISK): for trade in self.Trades: trade.Gammas(SCENARIO,RISK)",
"for trade in self.Trades: trade.Gammas(SCENARIO,RISK) def Vegas(self,SCENARIO,RISK): for trade in self.Trades: trade.Vegas(SCENARIO,RISK) def",
"Cash = 0.0 Name = '' def __init__(self,name=''): self.Name = name self.Trades =",
"self.Cash = 0.0 def Append(self,X): self.Trades.append(X) def TradeCount(self): return len(self.Trades) def NPV(self,SCENARIO): dblTotal",
"trade.Gammas(SCENARIO,RISK) def Vegas(self,SCENARIO,RISK): for trade in self.Trades: trade.Vegas(SCENARIO,RISK) def Rhos(self,SCENARIO,RISK): for trade in",
"[] self.Cash = 0.0 def Append(self,X): self.Trades.append(X) def TradeCount(self): return len(self.Trades) def NPV(self,SCENARIO):",
"are the trades in the portfolio # run-time interpretation will handle polymorphism for",
"run-time interpretation will handle polymorphism for me Trades = [] Cash = 0.0",
"trade.NPV(SCENARIO) return dblTotal def Deltas(self,SCENARIO,RISK): for trade in self.Trades: trade.Deltas(SCENARIO,RISK) def Gammas(self,SCENARIO,RISK): for",
"Portfolio: # define array of items that are the trades in the portfolio",
"self.Name = name self.Trades = [] self.Cash = 0.0 def Append(self,X): self.Trades.append(X) def",
"= 0.0 Name = '' def __init__(self,name=''): self.Name = name self.Trades = []",
"self.Trades: trade.Deltas(SCENARIO,RISK) def Gammas(self,SCENARIO,RISK): for trade in self.Trades: trade.Gammas(SCENARIO,RISK) def Vegas(self,SCENARIO,RISK): for trade",
"def __init__(self,name=''): self.Name = name self.Trades = [] self.Cash = 0.0 def Append(self,X):"
] |
[
"\"123\" with self.assertRaises(InputErrors) as context: log = starkbank.deposit.log.get(id=log_id) errors = context.exception.errors for error",
"log in logs})) print(\"Number of logs:\", len(logs)) class TestDepositLogInfoGet(TestCase): def test_success(self): logs =",
"starkbank.user = exampleProject class TestDepositLogGet(TestCase): def test_success(self): logs = list(starkbank.deposit.log.query(limit=10)) logs = list(starkbank.deposit.log.query(limit=10,",
"test_success(self): logs = starkbank.deposit.log.query() log_id = next(logs).id logs = starkbank.deposit.log.get(id=log_id) def test_fail_invalid_log(self): log_id",
"def test_fail_invalid_log(self): log_id = \"123\" with self.assertRaises(InputErrors) as context: log = starkbank.deposit.log.get(id=log_id) errors",
"for log in logs})) print(\"Number of logs:\", len(logs)) class TestDepositLogInfoGet(TestCase): def test_success(self): logs",
"test_success(self): logs = list(starkbank.deposit.log.query(limit=10)) logs = list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for log in logs}, types={log.type",
"logs = list(starkbank.deposit.log.query(limit=10)) logs = list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for log in logs}, types={log.type for",
"import starkbank from unittest import TestCase, main from starkbank.error import InputErrors from tests.utils.user",
"list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for log in logs}, types={log.type for log in logs})) print(\"Number of",
"TestDepositLogInfoGet(TestCase): def test_success(self): logs = starkbank.deposit.log.query() log_id = next(logs).id logs = starkbank.deposit.log.get(id=log_id) def",
"log_id = \"123\" with self.assertRaises(InputErrors) as context: log = starkbank.deposit.log.get(id=log_id) errors = context.exception.errors",
"context: log = starkbank.deposit.log.get(id=log_id) errors = context.exception.errors for error in errors: print(error) self.assertEqual('invalidDepositLog',",
"= next(logs).id logs = starkbank.deposit.log.get(id=log_id) def test_fail_invalid_log(self): log_id = \"123\" with self.assertRaises(InputErrors) as",
"def test_success(self): logs = list(starkbank.deposit.log.query(limit=10)) logs = list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for log in logs},",
"logs}, types={log.type for log in logs})) print(\"Number of logs:\", len(logs)) class TestDepositLogInfoGet(TestCase): def",
"log in logs}, types={log.type for log in logs})) print(\"Number of logs:\", len(logs)) class",
"from unittest import TestCase, main from starkbank.error import InputErrors from tests.utils.user import exampleProject",
"logs = starkbank.deposit.log.get(id=log_id) def test_fail_invalid_log(self): log_id = \"123\" with self.assertRaises(InputErrors) as context: log",
"class TestDepositLogGet(TestCase): def test_success(self): logs = list(starkbank.deposit.log.query(limit=10)) logs = list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for log",
"for log in logs}, types={log.type for log in logs})) print(\"Number of logs:\", len(logs))",
"exampleProject class TestDepositLogGet(TestCase): def test_success(self): logs = list(starkbank.deposit.log.query(limit=10)) logs = list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for",
"logs:\", len(logs)) class TestDepositLogInfoGet(TestCase): def test_success(self): logs = starkbank.deposit.log.query() log_id = next(logs).id logs",
"def test_success(self): logs = starkbank.deposit.log.query() log_id = next(logs).id logs = starkbank.deposit.log.get(id=log_id) def test_fail_invalid_log(self):",
"self.assertRaises(InputErrors) as context: log = starkbank.deposit.log.get(id=log_id) errors = context.exception.errors for error in errors:",
"= context.exception.errors for error in errors: print(error) self.assertEqual('invalidDepositLog', error.code) self.assertEqual(1, len(errors)) if __name__",
"= \"123\" with self.assertRaises(InputErrors) as context: log = starkbank.deposit.log.get(id=log_id) errors = context.exception.errors for",
"import InputErrors from tests.utils.user import exampleProject starkbank.user = exampleProject class TestDepositLogGet(TestCase): def test_success(self):",
"logs = starkbank.deposit.log.query() log_id = next(logs).id logs = starkbank.deposit.log.get(id=log_id) def test_fail_invalid_log(self): log_id =",
"log_id = next(logs).id logs = starkbank.deposit.log.get(id=log_id) def test_fail_invalid_log(self): log_id = \"123\" with self.assertRaises(InputErrors)",
"for error in errors: print(error) self.assertEqual('invalidDepositLog', error.code) self.assertEqual(1, len(errors)) if __name__ == '__main__':",
"error in errors: print(error) self.assertEqual('invalidDepositLog', error.code) self.assertEqual(1, len(errors)) if __name__ == '__main__': main()",
"next(logs).id logs = starkbank.deposit.log.get(id=log_id) def test_fail_invalid_log(self): log_id = \"123\" with self.assertRaises(InputErrors) as context:",
"from tests.utils.user import exampleProject starkbank.user = exampleProject class TestDepositLogGet(TestCase): def test_success(self): logs =",
"main from starkbank.error import InputErrors from tests.utils.user import exampleProject starkbank.user = exampleProject class",
"= starkbank.deposit.log.get(id=log_id) errors = context.exception.errors for error in errors: print(error) self.assertEqual('invalidDepositLog', error.code) self.assertEqual(1,",
"errors = context.exception.errors for error in errors: print(error) self.assertEqual('invalidDepositLog', error.code) self.assertEqual(1, len(errors)) if",
"context.exception.errors for error in errors: print(error) self.assertEqual('invalidDepositLog', error.code) self.assertEqual(1, len(errors)) if __name__ ==",
"logs})) print(\"Number of logs:\", len(logs)) class TestDepositLogInfoGet(TestCase): def test_success(self): logs = starkbank.deposit.log.query() log_id",
"from starkbank.error import InputErrors from tests.utils.user import exampleProject starkbank.user = exampleProject class TestDepositLogGet(TestCase):",
"print(\"Number of logs:\", len(logs)) class TestDepositLogInfoGet(TestCase): def test_success(self): logs = starkbank.deposit.log.query() log_id =",
"= starkbank.deposit.log.query() log_id = next(logs).id logs = starkbank.deposit.log.get(id=log_id) def test_fail_invalid_log(self): log_id = \"123\"",
"len(logs)) class TestDepositLogInfoGet(TestCase): def test_success(self): logs = starkbank.deposit.log.query() log_id = next(logs).id logs =",
"of logs:\", len(logs)) class TestDepositLogInfoGet(TestCase): def test_success(self): logs = starkbank.deposit.log.query() log_id = next(logs).id",
"starkbank.deposit.log.query() log_id = next(logs).id logs = starkbank.deposit.log.get(id=log_id) def test_fail_invalid_log(self): log_id = \"123\" with",
"as context: log = starkbank.deposit.log.get(id=log_id) errors = context.exception.errors for error in errors: print(error)",
"exampleProject starkbank.user = exampleProject class TestDepositLogGet(TestCase): def test_success(self): logs = list(starkbank.deposit.log.query(limit=10)) logs =",
"starkbank from unittest import TestCase, main from starkbank.error import InputErrors from tests.utils.user import",
"<reponame>starkbank/sdk-python<filename>tests/api/testDepositLog.py import starkbank from unittest import TestCase, main from starkbank.error import InputErrors from",
"import TestCase, main from starkbank.error import InputErrors from tests.utils.user import exampleProject starkbank.user =",
"= list(starkbank.deposit.log.query(limit=10)) logs = list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for log in logs}, types={log.type for log",
"InputErrors from tests.utils.user import exampleProject starkbank.user = exampleProject class TestDepositLogGet(TestCase): def test_success(self): logs",
"unittest import TestCase, main from starkbank.error import InputErrors from tests.utils.user import exampleProject starkbank.user",
"= list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for log in logs}, types={log.type for log in logs})) print(\"Number",
"TestCase, main from starkbank.error import InputErrors from tests.utils.user import exampleProject starkbank.user = exampleProject",
"logs = list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for log in logs}, types={log.type for log in logs}))",
"class TestDepositLogInfoGet(TestCase): def test_success(self): logs = starkbank.deposit.log.query() log_id = next(logs).id logs = starkbank.deposit.log.get(id=log_id)",
"with self.assertRaises(InputErrors) as context: log = starkbank.deposit.log.get(id=log_id) errors = context.exception.errors for error in",
"= starkbank.deposit.log.get(id=log_id) def test_fail_invalid_log(self): log_id = \"123\" with self.assertRaises(InputErrors) as context: log =",
"types={log.type for log in logs})) print(\"Number of logs:\", len(logs)) class TestDepositLogInfoGet(TestCase): def test_success(self):",
"in logs})) print(\"Number of logs:\", len(logs)) class TestDepositLogInfoGet(TestCase): def test_success(self): logs = starkbank.deposit.log.query()",
"in logs}, types={log.type for log in logs})) print(\"Number of logs:\", len(logs)) class TestDepositLogInfoGet(TestCase):",
"test_fail_invalid_log(self): log_id = \"123\" with self.assertRaises(InputErrors) as context: log = starkbank.deposit.log.get(id=log_id) errors =",
"log = starkbank.deposit.log.get(id=log_id) errors = context.exception.errors for error in errors: print(error) self.assertEqual('invalidDepositLog', error.code)",
"tests.utils.user import exampleProject starkbank.user = exampleProject class TestDepositLogGet(TestCase): def test_success(self): logs = list(starkbank.deposit.log.query(limit=10))",
"starkbank.deposit.log.get(id=log_id) errors = context.exception.errors for error in errors: print(error) self.assertEqual('invalidDepositLog', error.code) self.assertEqual(1, len(errors))",
"TestDepositLogGet(TestCase): def test_success(self): logs = list(starkbank.deposit.log.query(limit=10)) logs = list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for log in",
"starkbank.error import InputErrors from tests.utils.user import exampleProject starkbank.user = exampleProject class TestDepositLogGet(TestCase): def",
"= exampleProject class TestDepositLogGet(TestCase): def test_success(self): logs = list(starkbank.deposit.log.query(limit=10)) logs = list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id",
"starkbank.deposit.log.get(id=log_id) def test_fail_invalid_log(self): log_id = \"123\" with self.assertRaises(InputErrors) as context: log = starkbank.deposit.log.get(id=log_id)",
"deposit_ids={log.deposit.id for log in logs}, types={log.type for log in logs})) print(\"Number of logs:\",",
"list(starkbank.deposit.log.query(limit=10)) logs = list(starkbank.deposit.log.query(limit=10, deposit_ids={log.deposit.id for log in logs}, types={log.type for log in",
"import exampleProject starkbank.user = exampleProject class TestDepositLogGet(TestCase): def test_success(self): logs = list(starkbank.deposit.log.query(limit=10)) logs"
] |
[
"main_cli(): parser = argparse.ArgumentParser() parser.add_argument( \"--input\", type=str, required=True, help=\"The PDF file in which",
"\"\"\" import zlib import argparse import pikepdf from pikepdf import Pdf, PdfImage, Name",
"page in pdf_main.pages: page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out) def _create_watermark_pdf(self): c = canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont( TTFont('times new",
"for page in pdf_main.pages: page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out) def _create_watermark_pdf(self): c = canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont( TTFont('times",
"in which will be saved result\" ) parser.add_argument( \"--text\", type=str, required=True, help=\"The text",
"Pdf, PdfImage, Name from reportlab.pdfgen import canvas from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts",
"roman', 'Times New Roman.ttf')) c.setFont('times new roman', self.wm_font_size) pw, ph = c._pagesize c.setFillGray(0.5,",
"import pdfmetrics from reportlab.pdfbase.ttfonts import TTFont from reportlab.lib import units class PdfWatermark: def",
"400, self.wm_text) c.restoreState() c.save() def main_cli(): parser = argparse.ArgumentParser() parser.add_argument( \"--input\", type=str, required=True,",
"pikepdf.open(self.pdf_in) as pdf_main: with pikepdf.open(self.pdf_watermark) as pdf_wm: for page in pdf_main.pages: page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out)",
"python3 \"\"\" sudo apt-get install libqpdf-dev \"\"\" import zlib import argparse import pikepdf",
"apt-get install libqpdf-dev \"\"\" import zlib import argparse import pikepdf from pikepdf import",
"#!/usr/bin/env python3 \"\"\" sudo apt-get install libqpdf-dev \"\"\" import zlib import argparse import",
"c = canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont( TTFont('times new roman', 'Times New Roman.ttf')) c.setFont('times new roman',",
"canvas from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import TTFont from reportlab.lib import units",
"pdf_in self.pdf_out = pdf_out self.pdf_watermark = \"wm.pdf\" self.wm_font_size = 20 self.wm_text = text",
"self.wm_text) c.restoreState() c.save() def main_cli(): parser = argparse.ArgumentParser() parser.add_argument( \"--input\", type=str, required=True, help=\"The",
"= pdf_out self.pdf_watermark = \"wm.pdf\" self.wm_font_size = 20 self.wm_text = text self.wm_alpha =",
"inserted watermark\" ) parser.add_argument( \"--out\", type=str, required=True, help=\"The PDF file in which will",
"from reportlab.pdfgen import canvas from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import TTFont from",
"pdfmetrics.registerFont( TTFont('times new roman', 'Times New Roman.ttf')) c.setFont('times new roman', self.wm_font_size) pw, ph",
"will be saved result\" ) parser.add_argument( \"--text\", type=str, required=True, help=\"The text of watermark\"",
"as pdf_main: with pikepdf.open(self.pdf_watermark) as pdf_wm: for page in pdf_main.pages: page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out) def",
"Roman.ttf')) c.setFont('times new roman', self.wm_font_size) pw, ph = c._pagesize c.setFillGray(0.5, self.wm_alpha) c.saveState() c.translate(500,",
"result\" ) parser.add_argument( \"--text\", type=str, required=True, help=\"The text of watermark\" ) args =",
"watermark\" ) parser.add_argument( \"--out\", type=str, required=True, help=\"The PDF file in which will be",
"c.save() def main_cli(): parser = argparse.ArgumentParser() parser.add_argument( \"--input\", type=str, required=True, help=\"The PDF file",
") args = parser.parse_args() srv = PdfWatermark(args.input, args.out, args.text) srv.apply() if __name__ ==",
"50, ph - 400, self.wm_text) c.restoreState() c.save() def main_cli(): parser = argparse.ArgumentParser() parser.add_argument(",
") parser.add_argument( \"--text\", type=str, required=True, help=\"The text of watermark\" ) args = parser.parse_args()",
"import argparse import pikepdf from pikepdf import Pdf, PdfImage, Name from reportlab.pdfgen import",
"import TTFont from reportlab.lib import units class PdfWatermark: def __init__(self, pdf_in: str, pdf_out:",
"units class PdfWatermark: def __init__(self, pdf_in: str, pdf_out: str, text: str): self.pdf_in =",
"parser.add_argument( \"--input\", type=str, required=True, help=\"The PDF file in which will be inserted watermark\"",
"c._pagesize c.setFillGray(0.5, self.wm_alpha) c.saveState() c.translate(500, 100) c.rotate(45) c.drawCentredString(pw / 2 - 50, ph",
"'Times New Roman.ttf')) c.setFont('times new roman', self.wm_font_size) pw, ph = c._pagesize c.setFillGray(0.5, self.wm_alpha)",
"str, pdf_out: str, text: str): self.pdf_in = pdf_in self.pdf_out = pdf_out self.pdf_watermark =",
"required=True, help=\"The PDF file in which will be saved result\" ) parser.add_argument( \"--text\",",
"c.setFont('times new roman', self.wm_font_size) pw, ph = c._pagesize c.setFillGray(0.5, self.wm_alpha) c.saveState() c.translate(500, 100)",
"PdfImage, Name from reportlab.pdfgen import canvas from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import",
"self.wm_text = text self.wm_alpha = 0.2 def apply(self): self._create_watermark_pdf() with pikepdf.open(self.pdf_in) as pdf_main:",
"c.saveState() c.translate(500, 100) c.rotate(45) c.drawCentredString(pw / 2 - 50, ph - 400, self.wm_text)",
"pdf_in: str, pdf_out: str, text: str): self.pdf_in = pdf_in self.pdf_out = pdf_out self.pdf_watermark",
"def _create_watermark_pdf(self): c = canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont( TTFont('times new roman', 'Times New Roman.ttf')) c.setFont('times",
"= \"wm.pdf\" self.wm_font_size = 20 self.wm_text = text self.wm_alpha = 0.2 def apply(self):",
"import pikepdf from pikepdf import Pdf, PdfImage, Name from reportlab.pdfgen import canvas from",
"self.pdf_watermark = \"wm.pdf\" self.wm_font_size = 20 self.wm_text = text self.wm_alpha = 0.2 def",
"- 400, self.wm_text) c.restoreState() c.save() def main_cli(): parser = argparse.ArgumentParser() parser.add_argument( \"--input\", type=str,",
"self._create_watermark_pdf() with pikepdf.open(self.pdf_in) as pdf_main: with pikepdf.open(self.pdf_watermark) as pdf_wm: for page in pdf_main.pages:",
"c.drawCentredString(pw / 2 - 50, ph - 400, self.wm_text) c.restoreState() c.save() def main_cli():",
"pdf_wm: for page in pdf_main.pages: page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out) def _create_watermark_pdf(self): c = canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont(",
"reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import TTFont from reportlab.lib import units class PdfWatermark:",
"pdf_out: str, text: str): self.pdf_in = pdf_in self.pdf_out = pdf_out self.pdf_watermark = \"wm.pdf\"",
"self.wm_font_size) pw, ph = c._pagesize c.setFillGray(0.5, self.wm_alpha) c.saveState() c.translate(500, 100) c.rotate(45) c.drawCentredString(pw /",
"argparse.ArgumentParser() parser.add_argument( \"--input\", type=str, required=True, help=\"The PDF file in which will be inserted",
"\"--text\", type=str, required=True, help=\"The text of watermark\" ) args = parser.parse_args() srv =",
"sudo apt-get install libqpdf-dev \"\"\" import zlib import argparse import pikepdf from pikepdf",
"self.wm_font_size = 20 self.wm_text = text self.wm_alpha = 0.2 def apply(self): self._create_watermark_pdf() with",
"c.translate(500, 100) c.rotate(45) c.drawCentredString(pw / 2 - 50, ph - 400, self.wm_text) c.restoreState()",
"pdf_main.pages: page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out) def _create_watermark_pdf(self): c = canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont( TTFont('times new roman', 'Times",
"reportlab.lib import units class PdfWatermark: def __init__(self, pdf_in: str, pdf_out: str, text: str):",
"which will be inserted watermark\" ) parser.add_argument( \"--out\", type=str, required=True, help=\"The PDF file",
"pdfmetrics from reportlab.pdfbase.ttfonts import TTFont from reportlab.lib import units class PdfWatermark: def __init__(self,",
"= 20 self.wm_text = text self.wm_alpha = 0.2 def apply(self): self._create_watermark_pdf() with pikepdf.open(self.pdf_in)",
"parser = argparse.ArgumentParser() parser.add_argument( \"--input\", type=str, required=True, help=\"The PDF file in which will",
"New Roman.ttf')) c.setFont('times new roman', self.wm_font_size) pw, ph = c._pagesize c.setFillGray(0.5, self.wm_alpha) c.saveState()",
"\"\"\" sudo apt-get install libqpdf-dev \"\"\" import zlib import argparse import pikepdf from",
"self.pdf_out = pdf_out self.pdf_watermark = \"wm.pdf\" self.wm_font_size = 20 self.wm_text = text self.wm_alpha",
"apply(self): self._create_watermark_pdf() with pikepdf.open(self.pdf_in) as pdf_main: with pikepdf.open(self.pdf_watermark) as pdf_wm: for page in",
"help=\"The PDF file in which will be saved result\" ) parser.add_argument( \"--text\", type=str,",
"= argparse.ArgumentParser() parser.add_argument( \"--input\", type=str, required=True, help=\"The PDF file in which will be",
"PDF file in which will be saved result\" ) parser.add_argument( \"--text\", type=str, required=True,",
"new roman', 'Times New Roman.ttf')) c.setFont('times new roman', self.wm_font_size) pw, ph = c._pagesize",
"\"--input\", type=str, required=True, help=\"The PDF file in which will be inserted watermark\" )",
"type=str, required=True, help=\"The PDF file in which will be inserted watermark\" ) parser.add_argument(",
"def __init__(self, pdf_in: str, pdf_out: str, text: str): self.pdf_in = pdf_in self.pdf_out =",
"file in which will be saved result\" ) parser.add_argument( \"--text\", type=str, required=True, help=\"The",
"roman', self.wm_font_size) pw, ph = c._pagesize c.setFillGray(0.5, self.wm_alpha) c.saveState() c.translate(500, 100) c.rotate(45) c.drawCentredString(pw",
"str): self.pdf_in = pdf_in self.pdf_out = pdf_out self.pdf_watermark = \"wm.pdf\" self.wm_font_size = 20",
"new roman', self.wm_font_size) pw, ph = c._pagesize c.setFillGray(0.5, self.wm_alpha) c.saveState() c.translate(500, 100) c.rotate(45)",
"self.wm_alpha = 0.2 def apply(self): self._create_watermark_pdf() with pikepdf.open(self.pdf_in) as pdf_main: with pikepdf.open(self.pdf_watermark) as",
"args = parser.parse_args() srv = PdfWatermark(args.input, args.out, args.text) srv.apply() if __name__ == \"__main__\":",
"text: str): self.pdf_in = pdf_in self.pdf_out = pdf_out self.pdf_watermark = \"wm.pdf\" self.wm_font_size =",
"c.restoreState() c.save() def main_cli(): parser = argparse.ArgumentParser() parser.add_argument( \"--input\", type=str, required=True, help=\"The PDF",
"reportlab.pdfbase.ttfonts import TTFont from reportlab.lib import units class PdfWatermark: def __init__(self, pdf_in: str,",
"import Pdf, PdfImage, Name from reportlab.pdfgen import canvas from reportlab.pdfbase import pdfmetrics from",
"pdf_out self.pdf_watermark = \"wm.pdf\" self.wm_font_size = 20 self.wm_text = text self.wm_alpha = 0.2",
"c.setFillGray(0.5, self.wm_alpha) c.saveState() c.translate(500, 100) c.rotate(45) c.drawCentredString(pw / 2 - 50, ph -",
"which will be saved result\" ) parser.add_argument( \"--text\", type=str, required=True, help=\"The text of",
"page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out) def _create_watermark_pdf(self): c = canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont( TTFont('times new roman', 'Times New",
"parser.add_argument( \"--out\", type=str, required=True, help=\"The PDF file in which will be saved result\"",
"reportlab.pdfgen import canvas from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import TTFont from reportlab.lib",
"= 0.2 def apply(self): self._create_watermark_pdf() with pikepdf.open(self.pdf_in) as pdf_main: with pikepdf.open(self.pdf_watermark) as pdf_wm:",
"saved result\" ) parser.add_argument( \"--text\", type=str, required=True, help=\"The text of watermark\" ) args",
") parser.add_argument( \"--out\", type=str, required=True, help=\"The PDF file in which will be saved",
"= parser.parse_args() srv = PdfWatermark(args.input, args.out, args.text) srv.apply() if __name__ == \"__main__\": main_cli()",
"be saved result\" ) parser.add_argument( \"--text\", type=str, required=True, help=\"The text of watermark\" )",
"c.rotate(45) c.drawCentredString(pw / 2 - 50, ph - 400, self.wm_text) c.restoreState() c.save() def",
"will be inserted watermark\" ) parser.add_argument( \"--out\", type=str, required=True, help=\"The PDF file in",
"required=True, help=\"The PDF file in which will be inserted watermark\" ) parser.add_argument( \"--out\",",
"import units class PdfWatermark: def __init__(self, pdf_in: str, pdf_out: str, text: str): self.pdf_in",
"type=str, required=True, help=\"The PDF file in which will be saved result\" ) parser.add_argument(",
"from pikepdf import Pdf, PdfImage, Name from reportlab.pdfgen import canvas from reportlab.pdfbase import",
"pikepdf import Pdf, PdfImage, Name from reportlab.pdfgen import canvas from reportlab.pdfbase import pdfmetrics",
"required=True, help=\"The text of watermark\" ) args = parser.parse_args() srv = PdfWatermark(args.input, args.out,",
"in which will be inserted watermark\" ) parser.add_argument( \"--out\", type=str, required=True, help=\"The PDF",
"/ 2 - 50, ph - 400, self.wm_text) c.restoreState() c.save() def main_cli(): parser",
"text of watermark\" ) args = parser.parse_args() srv = PdfWatermark(args.input, args.out, args.text) srv.apply()",
"_create_watermark_pdf(self): c = canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont( TTFont('times new roman', 'Times New Roman.ttf')) c.setFont('times new",
"from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import TTFont from reportlab.lib import units class",
"100) c.rotate(45) c.drawCentredString(pw / 2 - 50, ph - 400, self.wm_text) c.restoreState() c.save()",
"self.pdf_in = pdf_in self.pdf_out = pdf_out self.pdf_watermark = \"wm.pdf\" self.wm_font_size = 20 self.wm_text",
"parser.add_argument( \"--text\", type=str, required=True, help=\"The text of watermark\" ) args = parser.parse_args() srv",
"type=str, required=True, help=\"The text of watermark\" ) args = parser.parse_args() srv = PdfWatermark(args.input,",
"of watermark\" ) args = parser.parse_args() srv = PdfWatermark(args.input, args.out, args.text) srv.apply() if",
"canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont( TTFont('times new roman', 'Times New Roman.ttf')) c.setFont('times new roman', self.wm_font_size) pw,",
"20 self.wm_text = text self.wm_alpha = 0.2 def apply(self): self._create_watermark_pdf() with pikepdf.open(self.pdf_in) as",
"= canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont( TTFont('times new roman', 'Times New Roman.ttf')) c.setFont('times new roman', self.wm_font_size)",
"pw, ph = c._pagesize c.setFillGray(0.5, self.wm_alpha) c.saveState() c.translate(500, 100) c.rotate(45) c.drawCentredString(pw / 2",
"text self.wm_alpha = 0.2 def apply(self): self._create_watermark_pdf() with pikepdf.open(self.pdf_in) as pdf_main: with pikepdf.open(self.pdf_watermark)",
"TTFont from reportlab.lib import units class PdfWatermark: def __init__(self, pdf_in: str, pdf_out: str,",
"pikepdf.open(self.pdf_watermark) as pdf_wm: for page in pdf_main.pages: page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out) def _create_watermark_pdf(self): c =",
"2 - 50, ph - 400, self.wm_text) c.restoreState() c.save() def main_cli(): parser =",
"def apply(self): self._create_watermark_pdf() with pikepdf.open(self.pdf_in) as pdf_main: with pikepdf.open(self.pdf_watermark) as pdf_wm: for page",
"watermark\" ) args = parser.parse_args() srv = PdfWatermark(args.input, args.out, args.text) srv.apply() if __name__",
"PDF file in which will be inserted watermark\" ) parser.add_argument( \"--out\", type=str, required=True,",
"ph - 400, self.wm_text) c.restoreState() c.save() def main_cli(): parser = argparse.ArgumentParser() parser.add_argument( \"--input\",",
"file in which will be inserted watermark\" ) parser.add_argument( \"--out\", type=str, required=True, help=\"The",
"pikepdf from pikepdf import Pdf, PdfImage, Name from reportlab.pdfgen import canvas from reportlab.pdfbase",
"\"wm.pdf\" self.wm_font_size = 20 self.wm_text = text self.wm_alpha = 0.2 def apply(self): self._create_watermark_pdf()",
"= text self.wm_alpha = 0.2 def apply(self): self._create_watermark_pdf() with pikepdf.open(self.pdf_in) as pdf_main: with",
"ph = c._pagesize c.setFillGray(0.5, self.wm_alpha) c.saveState() c.translate(500, 100) c.rotate(45) c.drawCentredString(pw / 2 -",
"be inserted watermark\" ) parser.add_argument( \"--out\", type=str, required=True, help=\"The PDF file in which",
"install libqpdf-dev \"\"\" import zlib import argparse import pikepdf from pikepdf import Pdf,",
"with pikepdf.open(self.pdf_watermark) as pdf_wm: for page in pdf_main.pages: page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out) def _create_watermark_pdf(self): c",
"\"--out\", type=str, required=True, help=\"The PDF file in which will be saved result\" )",
"from reportlab.pdfbase.ttfonts import TTFont from reportlab.lib import units class PdfWatermark: def __init__(self, pdf_in:",
"PdfWatermark: def __init__(self, pdf_in: str, pdf_out: str, text: str): self.pdf_in = pdf_in self.pdf_out",
"TTFont('times new roman', 'Times New Roman.ttf')) c.setFont('times new roman', self.wm_font_size) pw, ph =",
"self.wm_alpha) c.saveState() c.translate(500, 100) c.rotate(45) c.drawCentredString(pw / 2 - 50, ph - 400,",
"import canvas from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import TTFont from reportlab.lib import",
"__init__(self, pdf_in: str, pdf_out: str, text: str): self.pdf_in = pdf_in self.pdf_out = pdf_out",
"Name from reportlab.pdfgen import canvas from reportlab.pdfbase import pdfmetrics from reportlab.pdfbase.ttfonts import TTFont",
"import zlib import argparse import pikepdf from pikepdf import Pdf, PdfImage, Name from",
"= pdf_in self.pdf_out = pdf_out self.pdf_watermark = \"wm.pdf\" self.wm_font_size = 20 self.wm_text =",
"- 50, ph - 400, self.wm_text) c.restoreState() c.save() def main_cli(): parser = argparse.ArgumentParser()",
"zlib import argparse import pikepdf from pikepdf import Pdf, PdfImage, Name from reportlab.pdfgen",
"class PdfWatermark: def __init__(self, pdf_in: str, pdf_out: str, text: str): self.pdf_in = pdf_in",
"0.2 def apply(self): self._create_watermark_pdf() with pikepdf.open(self.pdf_in) as pdf_main: with pikepdf.open(self.pdf_watermark) as pdf_wm: for",
"str, text: str): self.pdf_in = pdf_in self.pdf_out = pdf_out self.pdf_watermark = \"wm.pdf\" self.wm_font_size",
"as pdf_wm: for page in pdf_main.pages: page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out) def _create_watermark_pdf(self): c = canvas.Canvas(self.pdf_watermark)",
"def main_cli(): parser = argparse.ArgumentParser() parser.add_argument( \"--input\", type=str, required=True, help=\"The PDF file in",
"help=\"The PDF file in which will be inserted watermark\" ) parser.add_argument( \"--out\", type=str,",
"libqpdf-dev \"\"\" import zlib import argparse import pikepdf from pikepdf import Pdf, PdfImage,",
"argparse import pikepdf from pikepdf import Pdf, PdfImage, Name from reportlab.pdfgen import canvas",
"in pdf_main.pages: page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out) def _create_watermark_pdf(self): c = canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont( TTFont('times new roman',",
"help=\"The text of watermark\" ) args = parser.parse_args() srv = PdfWatermark(args.input, args.out, args.text)",
"with pikepdf.open(self.pdf_in) as pdf_main: with pikepdf.open(self.pdf_watermark) as pdf_wm: for page in pdf_main.pages: page.add_underlay(pdf_wm.pages[0])",
"= c._pagesize c.setFillGray(0.5, self.wm_alpha) c.saveState() c.translate(500, 100) c.rotate(45) c.drawCentredString(pw / 2 - 50,",
"pdf_main.save(self.pdf_out) def _create_watermark_pdf(self): c = canvas.Canvas(self.pdf_watermark) pdfmetrics.registerFont( TTFont('times new roman', 'Times New Roman.ttf'))",
"from reportlab.lib import units class PdfWatermark: def __init__(self, pdf_in: str, pdf_out: str, text:",
"pdf_main: with pikepdf.open(self.pdf_watermark) as pdf_wm: for page in pdf_main.pages: page.add_underlay(pdf_wm.pages[0]) pdf_main.save(self.pdf_out) def _create_watermark_pdf(self):"
] |
[
"ccnc.algorithm import ccnc_statistic from ccnc.data import LexicalDataset, ShuffledVariant from ccnc.filters import AnySubsequenceFilter from",
"\"../clics-data/clics2-network-edges.txt\") any_bigram_filter = AnySubsequenceFilter(2) print(\"English:\") true_score = ccnc_statistic(eng_lexicon, network, any_bigram_filter, 2, True) num_samples",
"= Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\") any_bigram_filter = AnySubsequenceFilter(2) print(\"English:\") true_score = ccnc_statistic(eng_lexicon, network, any_bigram_filter, 2,",
"range(0,num_samples): pseudo_eng = ShuffledVariant(eng_lexicon) resampled_score = ccnc_statistic(pseudo_eng, network, any_bigram_filter, 2, False) scores.append(resampled_score) if",
"\"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if __name__ == '__main__': eng_lexicon = LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts = sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts))",
"__name__ == '__main__': eng_lexicon = LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts = sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts)) + \" concepts:",
"1 print(\"p-value: \" + str(num_scores_above_true_score/num_samples)) mu = sum(scores)/len(scores) sigma = statistics.stdev(scores) zscore =",
"mu = sum(scores)/len(scores) sigma = statistics.stdev(scores) zscore = (true_score - mu)/sigma print(\"z-score: \"",
"on English data from ccnc.algorithm import ccnc_statistic from ccnc.data import LexicalDataset, ShuffledVariant from",
"= AnySubsequenceFilter(2) print(\"English:\") true_score = ccnc_statistic(eng_lexicon, network, any_bigram_filter, 2, True) num_samples = 1000",
"and thereby what counts as a shared two-segment subsequence) english_segments = [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\",",
"two-segment subsequence) english_segments = [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if __name__ == '__main__': eng_lexicon =",
"sigma = statistics.stdev(scores) zscore = (true_score - mu)/sigma print(\"z-score: \" + str(zscore)) print()",
"\" + str(num_scores_above_true_score/num_samples)) mu = sum(scores)/len(scores) sigma = statistics.stdev(scores) zscore = (true_score -",
"concepts: \" + str(concepts)) # load the CLICS2 network from the associated (data-specific)",
"if resampled_score > true_score: num_scores_above_true_score += 1 print(\"p-value: \" + str(num_scores_above_true_score/num_samples)) mu =",
"a shared two-segment subsequence) english_segments = [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if __name__ == '__main__':",
"true_score = ccnc_statistic(eng_lexicon, network, any_bigram_filter, 2, True) num_samples = 1000 num_scores_above_true_score = 0",
"as a shared two-segment subsequence) english_segments = [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if __name__ ==",
"\" + str(concepts)) # load the CLICS2 network from the associated (data-specific) model",
"= sum(scores)/len(scores) sigma = statistics.stdev(scores) zscore = (true_score - mu)/sigma print(\"z-score: \" +",
"from the associated (data-specific) model files network = Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\") any_bigram_filter = AnySubsequenceFilter(2)",
"representation (determines tokenization, # and thereby what counts as a shared two-segment subsequence)",
"False) scores.append(resampled_score) if resampled_score > true_score: num_scores_above_true_score += 1 print(\"p-value: \" + str(num_scores_above_true_score/num_samples))",
"0 scores = list() for i in range(0,num_samples): pseudo_eng = ShuffledVariant(eng_lexicon) resampled_score =",
"resampled_score > true_score: num_scores_above_true_score += 1 print(\"p-value: \" + str(num_scores_above_true_score/num_samples)) mu = sum(scores)/len(scores)",
"\" concepts: \" + str(concepts)) # load the CLICS2 network from the associated",
"import LexicalDataset, ShuffledVariant from ccnc.filters import AnySubsequenceFilter from clics2.model import Clics2Model import statistics",
"= LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts = sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts)) + \" concepts: \" + str(concepts)) #",
"Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\") any_bigram_filter = AnySubsequenceFilter(2) print(\"English:\") true_score = ccnc_statistic(eng_lexicon, network, any_bigram_filter, 2, True)",
"ccnc.filters import AnySubsequenceFilter from clics2.model import Clics2Model import statistics # define segments for",
"AnySubsequenceFilter from clics2.model import Clics2Model import statistics # define segments for English IPA",
"tokenization, # and thereby what counts as a shared two-segment subsequence) english_segments =",
"english_segments = [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if __name__ == '__main__': eng_lexicon = LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts",
"TEST 1: Validation of shared bigram significance test on English data from ccnc.algorithm",
"if __name__ == '__main__': eng_lexicon = LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts = sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts)) + \"",
"load the CLICS2 network from the associated (data-specific) model files network = Clics2Model(\"../clics-data/clics2-network-ids.txt\",",
"= ccnc_statistic(pseudo_eng, network, any_bigram_filter, 2, False) scores.append(resampled_score) if resampled_score > true_score: num_scores_above_true_score +=",
"statistics # define segments for English IPA representation (determines tokenization, # and thereby",
"define segments for English IPA representation (determines tokenization, # and thereby what counts",
"+= 1 print(\"p-value: \" + str(num_scores_above_true_score/num_samples)) mu = sum(scores)/len(scores) sigma = statistics.stdev(scores) zscore",
"# and thereby what counts as a shared two-segment subsequence) english_segments = [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\",",
"'__main__': eng_lexicon = LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts = sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts)) + \" concepts: \" +",
"list() for i in range(0,num_samples): pseudo_eng = ShuffledVariant(eng_lexicon) resampled_score = ccnc_statistic(pseudo_eng, network, any_bigram_filter,",
"bigram significance test on English data from ccnc.algorithm import ccnc_statistic from ccnc.data import",
"import Clics2Model import statistics # define segments for English IPA representation (determines tokenization,",
"what counts as a shared two-segment subsequence) english_segments = [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if",
"Validation of shared bigram significance test on English data from ccnc.algorithm import ccnc_statistic",
"scores = list() for i in range(0,num_samples): pseudo_eng = ShuffledVariant(eng_lexicon) resampled_score = ccnc_statistic(pseudo_eng,",
"== '__main__': eng_lexicon = LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts = sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts)) + \" concepts: \"",
"counts as a shared two-segment subsequence) english_segments = [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if __name__",
"(determines tokenization, # and thereby what counts as a shared two-segment subsequence) english_segments",
"2, False) scores.append(resampled_score) if resampled_score > true_score: num_scores_above_true_score += 1 print(\"p-value: \" +",
"associated (data-specific) model files network = Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\") any_bigram_filter = AnySubsequenceFilter(2) print(\"English:\") true_score",
"+ str(num_scores_above_true_score/num_samples)) mu = sum(scores)/len(scores) sigma = statistics.stdev(scores) zscore = (true_score - mu)/sigma",
"# load the CLICS2 network from the associated (data-specific) model files network =",
"network = Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\") any_bigram_filter = AnySubsequenceFilter(2) print(\"English:\") true_score = ccnc_statistic(eng_lexicon, network, any_bigram_filter,",
"shared bigram significance test on English data from ccnc.algorithm import ccnc_statistic from ccnc.data",
"True) num_samples = 1000 num_scores_above_true_score = 0 scores = list() for i in",
"= 1000 num_scores_above_true_score = 0 scores = list() for i in range(0,num_samples): pseudo_eng",
"= list() for i in range(0,num_samples): pseudo_eng = ShuffledVariant(eng_lexicon) resampled_score = ccnc_statistic(pseudo_eng, network,",
"English data from ccnc.algorithm import ccnc_statistic from ccnc.data import LexicalDataset, ShuffledVariant from ccnc.filters",
"import ccnc_statistic from ccnc.data import LexicalDataset, ShuffledVariant from ccnc.filters import AnySubsequenceFilter from clics2.model",
"str(num_scores_above_true_score/num_samples)) mu = sum(scores)/len(scores) sigma = statistics.stdev(scores) zscore = (true_score - mu)/sigma print(\"z-score:",
"<reponame>jdellert/ccnc<gh_stars>0 # TEST 1: Validation of shared bigram significance test on English data",
"eng_lexicon = LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts = sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts)) + \" concepts: \" + str(concepts))",
"resampled_score = ccnc_statistic(pseudo_eng, network, any_bigram_filter, 2, False) scores.append(resampled_score) if resampled_score > true_score: num_scores_above_true_score",
"the CLICS2 network from the associated (data-specific) model files network = Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\")",
"significance test on English data from ccnc.algorithm import ccnc_statistic from ccnc.data import LexicalDataset,",
"ccnc_statistic(eng_lexicon, network, any_bigram_filter, 2, True) num_samples = 1000 num_scores_above_true_score = 0 scores =",
"AnySubsequenceFilter(2) print(\"English:\") true_score = ccnc_statistic(eng_lexicon, network, any_bigram_filter, 2, True) num_samples = 1000 num_scores_above_true_score",
"sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts)) + \" concepts: \" + str(concepts)) # load the CLICS2 network",
"the associated (data-specific) model files network = Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\") any_bigram_filter = AnySubsequenceFilter(2) print(\"English:\")",
"import statistics # define segments for English IPA representation (determines tokenization, # and",
"from ccnc.filters import AnySubsequenceFilter from clics2.model import Clics2Model import statistics # define segments",
"print(\"p-value: \" + str(num_scores_above_true_score/num_samples)) mu = sum(scores)/len(scores) sigma = statistics.stdev(scores) zscore = (true_score",
"LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts = sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts)) + \" concepts: \" + str(concepts)) # load",
"of shared bigram significance test on English data from ccnc.algorithm import ccnc_statistic from",
"any_bigram_filter, 2, True) num_samples = 1000 num_scores_above_true_score = 0 scores = list() for",
"data from ccnc.algorithm import ccnc_statistic from ccnc.data import LexicalDataset, ShuffledVariant from ccnc.filters import",
"from ccnc.data import LexicalDataset, ShuffledVariant from ccnc.filters import AnySubsequenceFilter from clics2.model import Clics2Model",
"1: Validation of shared bigram significance test on English data from ccnc.algorithm import",
"+ str(concepts)) # load the CLICS2 network from the associated (data-specific) model files",
"ShuffledVariant from ccnc.filters import AnySubsequenceFilter from clics2.model import Clics2Model import statistics # define",
"English IPA representation (determines tokenization, # and thereby what counts as a shared",
"for i in range(0,num_samples): pseudo_eng = ShuffledVariant(eng_lexicon) resampled_score = ccnc_statistic(pseudo_eng, network, any_bigram_filter, 2,",
"num_scores_above_true_score += 1 print(\"p-value: \" + str(num_scores_above_true_score/num_samples)) mu = sum(scores)/len(scores) sigma = statistics.stdev(scores)",
"thereby what counts as a shared two-segment subsequence) english_segments = [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"]",
"clics2.model import Clics2Model import statistics # define segments for English IPA representation (determines",
"shared two-segment subsequence) english_segments = [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if __name__ == '__main__': eng_lexicon",
"ccnc_statistic(pseudo_eng, network, any_bigram_filter, 2, False) scores.append(resampled_score) if resampled_score > true_score: num_scores_above_true_score += 1",
"IPA representation (determines tokenization, # and thereby what counts as a shared two-segment",
"num_scores_above_true_score = 0 scores = list() for i in range(0,num_samples): pseudo_eng = ShuffledVariant(eng_lexicon)",
"scores.append(resampled_score) if resampled_score > true_score: num_scores_above_true_score += 1 print(\"p-value: \" + str(num_scores_above_true_score/num_samples)) mu",
"2, True) num_samples = 1000 num_scores_above_true_score = 0 scores = list() for i",
"ccnc.data import LexicalDataset, ShuffledVariant from ccnc.filters import AnySubsequenceFilter from clics2.model import Clics2Model import",
"# TEST 1: Validation of shared bigram significance test on English data from",
"concepts = sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts)) + \" concepts: \" + str(concepts)) # load the",
"files network = Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\") any_bigram_filter = AnySubsequenceFilter(2) print(\"English:\") true_score = ccnc_statistic(eng_lexicon, network,",
"= ShuffledVariant(eng_lexicon) resampled_score = ccnc_statistic(pseudo_eng, network, any_bigram_filter, 2, False) scores.append(resampled_score) if resampled_score >",
"= [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if __name__ == '__main__': eng_lexicon = LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts =",
"print(\"English:\") true_score = ccnc_statistic(eng_lexicon, network, any_bigram_filter, 2, True) num_samples = 1000 num_scores_above_true_score =",
"subsequence) english_segments = [\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if __name__ == '__main__': eng_lexicon = LexicalDataset(\"../eng-data/english.tsv\",english_segments)",
"test on English data from ccnc.algorithm import ccnc_statistic from ccnc.data import LexicalDataset, ShuffledVariant",
"in range(0,num_samples): pseudo_eng = ShuffledVariant(eng_lexicon) resampled_score = ccnc_statistic(pseudo_eng, network, any_bigram_filter, 2, False) scores.append(resampled_score)",
"ccnc_statistic from ccnc.data import LexicalDataset, ShuffledVariant from ccnc.filters import AnySubsequenceFilter from clics2.model import",
"> true_score: num_scores_above_true_score += 1 print(\"p-value: \" + str(num_scores_above_true_score/num_samples)) mu = sum(scores)/len(scores) sigma",
"true_score: num_scores_above_true_score += 1 print(\"p-value: \" + str(num_scores_above_true_score/num_samples)) mu = sum(scores)/len(scores) sigma =",
"= ccnc_statistic(eng_lexicon, network, any_bigram_filter, 2, True) num_samples = 1000 num_scores_above_true_score = 0 scores",
"network from the associated (data-specific) model files network = Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\") any_bigram_filter =",
"model files network = Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\") any_bigram_filter = AnySubsequenceFilter(2) print(\"English:\") true_score = ccnc_statistic(eng_lexicon,",
"i in range(0,num_samples): pseudo_eng = ShuffledVariant(eng_lexicon) resampled_score = ccnc_statistic(pseudo_eng, network, any_bigram_filter, 2, False)",
"[\"a\",\"ɑ\",\"ɒ\",\"ɑː\",\"æ\",\"ʌ\",\"aʊ\",\"b\",\"d\",\"dʒ\",\"ð\",\"ɛ\",\"ə\",\"eɪ\",\"ɛə\",\"f\", \"g\",\"h\",\"ɪ\",\"i\",\"iː\",\"aɪ\",\"ɪə\",\"j\",\"k\",\"l\",\"m\",\"n\",\"ŋ\",\"ɔː\",\"əʊ\",\"ɔɪ\", \"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if __name__ == '__main__': eng_lexicon = LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts = sorted(eng_lexicon.concept_to_forms.keys())",
"\"p\",\"r\",\"s\",\"ʃ\",\"t\",\"tʃ\",\"θ\",\"u\",\"ʊ\",\"uː\",\"ɜː\",\"ʊə\",\"w\",\"v\",\"z\"] if __name__ == '__main__': eng_lexicon = LexicalDataset(\"../eng-data/english.tsv\",english_segments) concepts = sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts)) +",
"Clics2Model import statistics # define segments for English IPA representation (determines tokenization, #",
"pseudo_eng = ShuffledVariant(eng_lexicon) resampled_score = ccnc_statistic(pseudo_eng, network, any_bigram_filter, 2, False) scores.append(resampled_score) if resampled_score",
"(data-specific) model files network = Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\") any_bigram_filter = AnySubsequenceFilter(2) print(\"English:\") true_score =",
"from clics2.model import Clics2Model import statistics # define segments for English IPA representation",
"for English IPA representation (determines tokenization, # and thereby what counts as a",
"= sorted(eng_lexicon.concept_to_forms.keys()) print(str(len(concepts)) + \" concepts: \" + str(concepts)) # load the CLICS2",
"1000 num_scores_above_true_score = 0 scores = list() for i in range(0,num_samples): pseudo_eng =",
"print(str(len(concepts)) + \" concepts: \" + str(concepts)) # load the CLICS2 network from",
"network, any_bigram_filter, 2, True) num_samples = 1000 num_scores_above_true_score = 0 scores = list()",
"any_bigram_filter, 2, False) scores.append(resampled_score) if resampled_score > true_score: num_scores_above_true_score += 1 print(\"p-value: \"",
"+ \" concepts: \" + str(concepts)) # load the CLICS2 network from the",
"# define segments for English IPA representation (determines tokenization, # and thereby what",
"ShuffledVariant(eng_lexicon) resampled_score = ccnc_statistic(pseudo_eng, network, any_bigram_filter, 2, False) scores.append(resampled_score) if resampled_score > true_score:",
"from ccnc.algorithm import ccnc_statistic from ccnc.data import LexicalDataset, ShuffledVariant from ccnc.filters import AnySubsequenceFilter",
"network, any_bigram_filter, 2, False) scores.append(resampled_score) if resampled_score > true_score: num_scores_above_true_score += 1 print(\"p-value:",
"sum(scores)/len(scores) sigma = statistics.stdev(scores) zscore = (true_score - mu)/sigma print(\"z-score: \" + str(zscore))",
"num_samples = 1000 num_scores_above_true_score = 0 scores = list() for i in range(0,num_samples):",
"= 0 scores = list() for i in range(0,num_samples): pseudo_eng = ShuffledVariant(eng_lexicon) resampled_score",
"any_bigram_filter = AnySubsequenceFilter(2) print(\"English:\") true_score = ccnc_statistic(eng_lexicon, network, any_bigram_filter, 2, True) num_samples =",
"segments for English IPA representation (determines tokenization, # and thereby what counts as",
"LexicalDataset, ShuffledVariant from ccnc.filters import AnySubsequenceFilter from clics2.model import Clics2Model import statistics #",
"import AnySubsequenceFilter from clics2.model import Clics2Model import statistics # define segments for English",
"str(concepts)) # load the CLICS2 network from the associated (data-specific) model files network",
"CLICS2 network from the associated (data-specific) model files network = Clics2Model(\"../clics-data/clics2-network-ids.txt\", \"../clics-data/clics2-network-edges.txt\") any_bigram_filter"
] |
[
"kernel_size, dilation=1, activation=nn.ReLU, separable=True): super().__init__() tcsconv = [] if separable: tcsconv += [",
"5), ResidualTCSConvBlock(512, 512, 63, 5), ResidualTCSConvBlock(512, 512, 75, 5), TCSConv(512, 512, 87, dilation=2),",
"kernel_size, num_blocks, activation=nn.ReLU): super().__init__() self.num_blocks = num_blocks self.layers = [] for i in",
"dilation=dilation), nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same', dilation=dilation) ] else: tcsconv += [ nn.Conv1d(in_channels, out_channels,",
"39, 5), ResidualTCSConvBlock(256, 512, 51, 5), ResidualTCSConvBlock(512, 512, 63, 5), ResidualTCSConvBlock(512, 512, 75,",
"torch import nn from torch.nn import Sequential from hw_asr.base import BaseModel class TCSConv(nn.Module):",
"TCSConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, activation=nn.ReLU, separable=True): super().__init__() tcsconv = []",
"activation=activation)) self.layers = nn.Sequential(*self.layers) self.res_block = nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same'), nn.BatchNorm1d(out_channels)) self.last_activation =",
"__init__(self, in_channels, out_channels, kernel_size, num_blocks, activation=nn.ReLU): super().__init__() self.num_blocks = num_blocks self.layers = []",
"nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same', dilation=dilation) ] else: tcsconv += [ nn.Conv1d(in_channels, out_channels, kernel_size,",
"padding='same', dilation=dilation) ] else: tcsconv += [ nn.Conv1d(in_channels, out_channels, kernel_size, padding='same', dilation=dilation) ]",
"kernel_size=1, padding='same', dilation=dilation) ] else: tcsconv += [ nn.Conv1d(in_channels, out_channels, kernel_size, padding='same', dilation=dilation)",
"in_channels, kernel_size, activation=activation)) self.layers = nn.Sequential(*self.layers) self.res_block = nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same'), nn.BatchNorm1d(out_channels))",
"kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), TCSConv(256, 256, 1, separable=False), nn.Conv1d(256, n_class,",
"kernel_size, padding='same', groups=in_channels, dilation=dilation), nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same', dilation=dilation) ] else: tcsconv +=",
"+= [ nn.Conv1d(in_channels, out_channels, kernel_size, padding='same', dilation=dilation) ] tcsconv.append(nn.BatchNorm1d(out_channels)) if activation is not",
"separable=True): super().__init__() tcsconv = [] if separable: tcsconv += [ nn.Conv1d(in_channels, in_channels, kernel_size,",
"activation=None)) continue self.layers.append(TCSConv(in_channels, in_channels, kernel_size, activation=activation)) self.layers = nn.Sequential(*self.layers) self.res_block = nn.Sequential(nn.Conv1d(in_channels, out_channels,",
"**kwargs) self.model = nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33,",
"dilation=dilation) ] else: tcsconv += [ nn.Conv1d(in_channels, out_channels, kernel_size, padding='same', dilation=dilation) ] tcsconv.append(nn.BatchNorm1d(out_channels))",
"def __init__(self, in_channels, out_channels, kernel_size, num_blocks, activation=nn.ReLU): super().__init__() self.num_blocks = num_blocks self.layers =",
"= activation() def forward(self, x): y = self.layers(x) return self.last_activation(self.res_block(x) + y) class",
") def forward(self, spectrogram, *args, **kwargs): res = self.model(spectrogram.transpose(2, 1)).transpose(1, 2) return res",
"n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args, **kwargs) self.model = nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256,",
"n_class, kernel_size=1, padding='same') ] ) def forward(self, spectrogram, *args, **kwargs): res = self.model(spectrogram.transpose(2,",
"5), TCSConv(512, 512, 87, dilation=2), TCSConv(512, 1024, 1, separable=False), nn.Conv1d(1024, n_class, kernel_size=1, padding='same')",
"tcsconv = [] if separable: tcsconv += [ nn.Conv1d(in_channels, in_channels, kernel_size, padding='same', groups=in_channels,",
"kernel_size, padding='same', dilation=dilation) ] tcsconv.append(nn.BatchNorm1d(out_channels)) if activation is not None: tcsconv.append(activation()) self.tcsconv =",
"padding='same') ] ) def forward(self, spectrogram, *args, **kwargs): res = self.model(spectrogram.transpose(2, 1)).transpose(1, 2)",
"return res def transform_input_lengths(self, input_lengths): return input_lengths // 2 class QuartznetSmall(BaseModel): def __init__(self,",
"== num_blocks: self.layers.append(TCSConv(in_channels, out_channels, kernel_size, activation=None)) continue self.layers.append(TCSConv(in_channels, in_channels, kernel_size, activation=activation)) self.layers =",
"forward(self, x): return self.tcsconv(x) class ResidualTCSConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, num_blocks, activation=nn.ReLU):",
"dilation=2), TCSConv(512, 1024, 1, separable=False), nn.Conv1d(1024, n_class, kernel_size=1, padding='same') ] ) def forward(self,",
"33, 5), ResidualTCSConvBlock(256, 256, 39, 5), ResidualTCSConvBlock(256, 512, 51, 5), ResidualTCSConvBlock(512, 512, 63,",
"separable=False), nn.Conv1d(256, n_class, kernel_size=1, padding='same') ] ) def forward(self, spectrogram, *args, **kwargs): res",
"51, 5), ResidualTCSConvBlock(512, 512, 63, 5), ResidualTCSConvBlock(512, 512, 75, 5), TCSConv(512, 512, 87,",
"super().__init__(n_feats, n_class, *args, **kwargs) self.model = nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16),",
"nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), TCSConv(256, 256,",
"activation=nn.ReLU): super().__init__() self.num_blocks = num_blocks self.layers = [] for i in range(num_blocks): if",
"self.layers.append(TCSConv(in_channels, in_channels, kernel_size, activation=activation)) self.layers = nn.Sequential(*self.layers) self.res_block = nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same'),",
"num_blocks self.layers = [] for i in range(num_blocks): if i + 1 ==",
"nn.Conv1d(in_channels, in_channels, kernel_size, padding='same', groups=in_channels, dilation=dilation), nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same', dilation=dilation) ] else:",
"torch.nn import Sequential from hw_asr.base import BaseModel class TCSConv(nn.Module): def __init__(self, in_channels, out_channels,",
"nn.Conv1d(in_channels, out_channels, kernel_size, padding='same', dilation=dilation) ] tcsconv.append(nn.BatchNorm1d(out_channels)) if activation is not None: tcsconv.append(activation())",
"self.num_blocks = num_blocks self.layers = [] for i in range(num_blocks): if i +",
"2 class QuartznetSmall(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args, **kwargs)",
"= [] if separable: tcsconv += [ nn.Conv1d(in_channels, in_channels, kernel_size, padding='same', groups=in_channels, dilation=dilation),",
"spectrogram, *args, **kwargs): res = self.model(spectrogram.transpose(2, 1)).transpose(1, 2) return res def transform_input_lengths(self, input_lengths):",
"class TCSConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, activation=nn.ReLU, separable=True): super().__init__() tcsconv =",
"def forward(self, x): return self.tcsconv(x) class ResidualTCSConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, num_blocks,",
"512, 63, 5), ResidualTCSConvBlock(512, 512, 75, 5), TCSConv(512, 512, 87, dilation=2), TCSConv(512, 1024,",
"import Sequential from hw_asr.base import BaseModel class TCSConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size,",
"separable=False), nn.Conv1d(1024, n_class, kernel_size=1, padding='same') ] ) def forward(self, spectrogram, *args, **kwargs): res",
"self.layers.append(TCSConv(in_channels, out_channels, kernel_size, activation=None)) continue self.layers.append(TCSConv(in_channels, in_channels, kernel_size, activation=activation)) self.layers = nn.Sequential(*self.layers) self.res_block",
"self.last_activation(self.res_block(x) + y) class Quartznet(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class,",
"ResidualTCSConvBlock(256, 256, 33, 5), ResidualTCSConvBlock(256, 256, 39, 5), ResidualTCSConvBlock(256, 512, 51, 5), ResidualTCSConvBlock(512,",
"[ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), ResidualTCSConvBlock(256, 256, 39,",
"import nn from torch.nn import Sequential from hw_asr.base import BaseModel class TCSConv(nn.Module): def",
"input_lengths): return input_lengths // 2 class QuartznetSmall(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs):",
"out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), ResidualTCSConvBlock(256, 256, 39, 5), ResidualTCSConvBlock(256,",
"[] for i in range(num_blocks): if i + 1 == num_blocks: self.layers.append(TCSConv(in_channels, out_channels,",
"super().__init__() self.num_blocks = num_blocks self.layers = [] for i in range(num_blocks): if i",
"ResidualTCSConvBlock(256, 256, 33, 5), TCSConv(256, 256, 1, separable=False), nn.Conv1d(256, n_class, kernel_size=1, padding='same') ]",
"256, 33, 5), TCSConv(256, 256, 1, separable=False), nn.Conv1d(256, n_class, kernel_size=1, padding='same') ] )",
"range(num_blocks): if i + 1 == num_blocks: self.layers.append(TCSConv(in_channels, out_channels, kernel_size, activation=None)) continue self.layers.append(TCSConv(in_channels,",
"2) return res def transform_input_lengths(self, input_lengths): return input_lengths // 2 class QuartznetSmall(BaseModel): def",
"self.layers(x) return self.last_activation(self.res_block(x) + y) class Quartznet(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs):",
"i + 1 == num_blocks: self.layers.append(TCSConv(in_channels, out_channels, kernel_size, activation=None)) continue self.layers.append(TCSConv(in_channels, in_channels, kernel_size,",
"= [] for i in range(num_blocks): if i + 1 == num_blocks: self.layers.append(TCSConv(in_channels,",
"**kwargs): res = self.model(spectrogram.transpose(2, 1)).transpose(1, 2) return res def transform_input_lengths(self, input_lengths): return input_lengths",
"63, 5), ResidualTCSConvBlock(512, 512, 75, 5), TCSConv(512, 512, 87, dilation=2), TCSConv(512, 1024, 1,",
"+ 1 == num_blocks: self.layers.append(TCSConv(in_channels, out_channels, kernel_size, activation=None)) continue self.layers.append(TCSConv(in_channels, in_channels, kernel_size, activation=activation))",
"self.tcsconv(x) class ResidualTCSConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, num_blocks, activation=nn.ReLU): super().__init__() self.num_blocks =",
"= self.layers(x) return self.last_activation(self.res_block(x) + y) class Quartznet(BaseModel): def __init__(self, n_feats, n_class, *args,",
"separable: tcsconv += [ nn.Conv1d(in_channels, in_channels, kernel_size, padding='same', groups=in_channels, dilation=dilation), nn.Conv1d(in_channels, out_channels, kernel_size=1,",
"in range(num_blocks): if i + 1 == num_blocks: self.layers.append(TCSConv(in_channels, out_channels, kernel_size, activation=None)) continue",
"ResidualTCSConvBlock(512, 512, 63, 5), ResidualTCSConvBlock(512, 512, 75, 5), TCSConv(512, 512, 87, dilation=2), TCSConv(512,",
"1, separable=False), nn.Conv1d(1024, n_class, kernel_size=1, padding='same') ] ) def forward(self, spectrogram, *args, **kwargs):",
"out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), TCSConv(256, 256, 1, separable=False), nn.Conv1d(256,",
"= nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same'), nn.BatchNorm1d(out_channels)) self.last_activation = activation() def forward(self, x): y",
"kernel_size=1, padding='same'), nn.BatchNorm1d(out_channels)) self.last_activation = activation() def forward(self, x): y = self.layers(x) return",
"y) class Quartznet(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args, **kwargs)",
"ResidualTCSConvBlock(256, 512, 51, 5), ResidualTCSConvBlock(512, 512, 63, 5), ResidualTCSConvBlock(512, 512, 75, 5), TCSConv(512,",
"n_class, *args, **kwargs) self.model = nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256,",
"= nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), TCSConv(256,",
"+= [ nn.Conv1d(in_channels, in_channels, kernel_size, padding='same', groups=in_channels, dilation=dilation), nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same', dilation=dilation)",
"tcsconv.append(nn.BatchNorm1d(out_channels)) if activation is not None: tcsconv.append(activation()) self.tcsconv = nn.Sequential(*tcsconv) def forward(self, x):",
"__init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args, **kwargs) self.model = nn.Sequential(* [",
"from hw_asr.base import BaseModel class TCSConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, activation=nn.ReLU,",
"class Quartznet(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args, **kwargs) self.model",
"self.tcsconv = nn.Sequential(*tcsconv) def forward(self, x): return self.tcsconv(x) class ResidualTCSConvBlock(nn.Module): def __init__(self, in_channels,",
"for i in range(num_blocks): if i + 1 == num_blocks: self.layers.append(TCSConv(in_channels, out_channels, kernel_size,",
"out_channels, kernel_size, activation=None)) continue self.layers.append(TCSConv(in_channels, in_channels, kernel_size, activation=activation)) self.layers = nn.Sequential(*self.layers) self.res_block =",
"else: tcsconv += [ nn.Conv1d(in_channels, out_channels, kernel_size, padding='same', dilation=dilation) ] tcsconv.append(nn.BatchNorm1d(out_channels)) if activation",
"forward(self, x): y = self.layers(x) return self.last_activation(self.res_block(x) + y) class Quartznet(BaseModel): def __init__(self,",
"256, 33, 5), ResidualTCSConvBlock(256, 256, 39, 5), ResidualTCSConvBlock(256, 512, 51, 5), ResidualTCSConvBlock(512, 512,",
"<reponame>art591/dla_asr<gh_stars>0 from torch import nn from torch.nn import Sequential from hw_asr.base import BaseModel",
"x): y = self.layers(x) return self.last_activation(self.res_block(x) + y) class Quartznet(BaseModel): def __init__(self, n_feats,",
"1024, 1, separable=False), nn.Conv1d(1024, n_class, kernel_size=1, padding='same') ] ) def forward(self, spectrogram, *args,",
"= nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), ResidualTCSConvBlock(256,",
"padding=16), ResidualTCSConvBlock(256, 256, 33, 5), ResidualTCSConvBlock(256, 256, 39, 5), ResidualTCSConvBlock(256, 512, 51, 5),",
"if separable: tcsconv += [ nn.Conv1d(in_channels, in_channels, kernel_size, padding='same', groups=in_channels, dilation=dilation), nn.Conv1d(in_channels, out_channels,",
"] tcsconv.append(nn.BatchNorm1d(out_channels)) if activation is not None: tcsconv.append(activation()) self.tcsconv = nn.Sequential(*tcsconv) def forward(self,",
"num_blocks, activation=nn.ReLU): super().__init__() self.num_blocks = num_blocks self.layers = [] for i in range(num_blocks):",
"kernel_size, activation=activation)) self.layers = nn.Sequential(*self.layers) self.res_block = nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same'), nn.BatchNorm1d(out_channels)) self.last_activation",
"BaseModel class TCSConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, activation=nn.ReLU, separable=True): super().__init__() tcsconv",
"if activation is not None: tcsconv.append(activation()) self.tcsconv = nn.Sequential(*tcsconv) def forward(self, x): return",
"nn.Sequential(*tcsconv) def forward(self, x): return self.tcsconv(x) class ResidualTCSConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size,",
"class ResidualTCSConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, num_blocks, activation=nn.ReLU): super().__init__() self.num_blocks = num_blocks",
"1, separable=False), nn.Conv1d(256, n_class, kernel_size=1, padding='same') ] ) def forward(self, spectrogram, *args, **kwargs):",
"[ nn.Conv1d(in_channels, out_channels, kernel_size, padding='same', dilation=dilation) ] tcsconv.append(nn.BatchNorm1d(out_channels)) if activation is not None:",
"1 == num_blocks: self.layers.append(TCSConv(in_channels, out_channels, kernel_size, activation=None)) continue self.layers.append(TCSConv(in_channels, in_channels, kernel_size, activation=activation)) self.layers",
"def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args, **kwargs) self.model = nn.Sequential(*",
"= num_blocks self.layers = [] for i in range(num_blocks): if i + 1",
"padding='same'), nn.BatchNorm1d(out_channels)) self.last_activation = activation() def forward(self, x): y = self.layers(x) return self.last_activation(self.res_block(x)",
"class QuartznetSmall(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args, **kwargs) self.model",
"groups=in_channels, dilation=dilation), nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same', dilation=dilation) ] else: tcsconv += [ nn.Conv1d(in_channels,",
"+ y) class Quartznet(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args,",
"is not None: tcsconv.append(activation()) self.tcsconv = nn.Sequential(*tcsconv) def forward(self, x): return self.tcsconv(x) class",
"continue self.layers.append(TCSConv(in_channels, in_channels, kernel_size, activation=activation)) self.layers = nn.Sequential(*self.layers) self.res_block = nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1,",
"] else: tcsconv += [ nn.Conv1d(in_channels, out_channels, kernel_size, padding='same', dilation=dilation) ] tcsconv.append(nn.BatchNorm1d(out_channels)) if",
"5), ResidualTCSConvBlock(512, 512, 75, 5), TCSConv(512, 512, 87, dilation=2), TCSConv(512, 1024, 1, separable=False),",
"in_channels, out_channels, kernel_size, dilation=1, activation=nn.ReLU, separable=True): super().__init__() tcsconv = [] if separable: tcsconv",
"padding='same', dilation=dilation) ] tcsconv.append(nn.BatchNorm1d(out_channels)) if activation is not None: tcsconv.append(activation()) self.tcsconv = nn.Sequential(*tcsconv)",
"None: tcsconv.append(activation()) self.tcsconv = nn.Sequential(*tcsconv) def forward(self, x): return self.tcsconv(x) class ResidualTCSConvBlock(nn.Module): def",
"**kwargs): super().__init__(n_feats, n_class, *args, **kwargs) self.model = nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2,",
"nn from torch.nn import Sequential from hw_asr.base import BaseModel class TCSConv(nn.Module): def __init__(self,",
"QuartznetSmall(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args, **kwargs) self.model =",
"from torch import nn from torch.nn import Sequential from hw_asr.base import BaseModel class",
"512, 51, 5), ResidualTCSConvBlock(512, 512, 63, 5), ResidualTCSConvBlock(512, 512, 75, 5), TCSConv(512, 512,",
"hw_asr.base import BaseModel class TCSConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, activation=nn.ReLU, separable=True):",
"padding='same', groups=in_channels, dilation=dilation), nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same', dilation=dilation) ] else: tcsconv += [",
"tcsconv.append(activation()) self.tcsconv = nn.Sequential(*tcsconv) def forward(self, x): return self.tcsconv(x) class ResidualTCSConvBlock(nn.Module): def __init__(self,",
"stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), TCSConv(256, 256, 1, separable=False), nn.Conv1d(256, n_class, kernel_size=1,",
"self.res_block = nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same'), nn.BatchNorm1d(out_channels)) self.last_activation = activation() def forward(self, x):",
"stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), ResidualTCSConvBlock(256, 256, 39, 5), ResidualTCSConvBlock(256, 512, 51,",
"i in range(num_blocks): if i + 1 == num_blocks: self.layers.append(TCSConv(in_channels, out_channels, kernel_size, activation=None))",
"256, 39, 5), ResidualTCSConvBlock(256, 512, 51, 5), ResidualTCSConvBlock(512, 512, 63, 5), ResidualTCSConvBlock(512, 512,",
"def forward(self, x): y = self.layers(x) return self.last_activation(self.res_block(x) + y) class Quartznet(BaseModel): def",
"tcsconv += [ nn.Conv1d(in_channels, out_channels, kernel_size, padding='same', dilation=dilation) ] tcsconv.append(nn.BatchNorm1d(out_channels)) if activation is",
"nn.Conv1d(1024, n_class, kernel_size=1, padding='same') ] ) def forward(self, spectrogram, *args, **kwargs): res =",
"out_channels, kernel_size, dilation=1, activation=nn.ReLU, separable=True): super().__init__() tcsconv = [] if separable: tcsconv +=",
"dilation=dilation) ] tcsconv.append(nn.BatchNorm1d(out_channels)) if activation is not None: tcsconv.append(activation()) self.tcsconv = nn.Sequential(*tcsconv) def",
"5), TCSConv(256, 256, 1, separable=False), nn.Conv1d(256, n_class, kernel_size=1, padding='same') ] ) def forward(self,",
"forward(self, spectrogram, *args, **kwargs): res = self.model(spectrogram.transpose(2, 1)).transpose(1, 2) return res def transform_input_lengths(self,",
"[] if separable: tcsconv += [ nn.Conv1d(in_channels, in_channels, kernel_size, padding='same', groups=in_channels, dilation=dilation), nn.Conv1d(in_channels,",
"nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), ResidualTCSConvBlock(256, 256, 39, 5),",
"kernel_size=1, padding='same') ] ) def forward(self, spectrogram, *args, **kwargs): res = self.model(spectrogram.transpose(2, 1)).transpose(1,",
"x): return self.tcsconv(x) class ResidualTCSConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, num_blocks, activation=nn.ReLU): super().__init__()",
"out_channels, kernel_size=1, padding='same'), nn.BatchNorm1d(out_channels)) self.last_activation = activation() def forward(self, x): y = self.layers(x)",
"return input_lengths // 2 class QuartznetSmall(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats,",
"activation is not None: tcsconv.append(activation()) self.tcsconv = nn.Sequential(*tcsconv) def forward(self, x): return self.tcsconv(x)",
"] ) def forward(self, spectrogram, *args, **kwargs): res = self.model(spectrogram.transpose(2, 1)).transpose(1, 2) return",
"self.model = nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5),",
"from torch.nn import Sequential from hw_asr.base import BaseModel class TCSConv(nn.Module): def __init__(self, in_channels,",
"*args, **kwargs): res = self.model(spectrogram.transpose(2, 1)).transpose(1, 2) return res def transform_input_lengths(self, input_lengths): return",
"tcsconv += [ nn.Conv1d(in_channels, in_channels, kernel_size, padding='same', groups=in_channels, dilation=dilation), nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same',",
"512, 75, 5), TCSConv(512, 512, 87, dilation=2), TCSConv(512, 1024, 1, separable=False), nn.Conv1d(1024, n_class,",
"nn.Sequential(*self.layers) self.res_block = nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same'), nn.BatchNorm1d(out_channels)) self.last_activation = activation() def forward(self,",
"[ nn.Conv1d(in_channels, in_channels, kernel_size, padding='same', groups=in_channels, dilation=dilation), nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same', dilation=dilation) ]",
"return self.last_activation(self.res_block(x) + y) class Quartznet(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats,",
"import BaseModel class TCSConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, activation=nn.ReLU, separable=True): super().__init__()",
"padding=16), ResidualTCSConvBlock(256, 256, 33, 5), TCSConv(256, 256, 1, separable=False), nn.Conv1d(256, n_class, kernel_size=1, padding='same')",
"ResidualTCSConvBlock(256, 256, 39, 5), ResidualTCSConvBlock(256, 512, 51, 5), ResidualTCSConvBlock(512, 512, 63, 5), ResidualTCSConvBlock(512,",
"in_channels, kernel_size, padding='same', groups=in_channels, dilation=dilation), nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same', dilation=dilation) ] else: tcsconv",
"kernel_size, activation=None)) continue self.layers.append(TCSConv(in_channels, in_channels, kernel_size, activation=activation)) self.layers = nn.Sequential(*self.layers) self.res_block = nn.Sequential(nn.Conv1d(in_channels,",
"if i + 1 == num_blocks: self.layers.append(TCSConv(in_channels, out_channels, kernel_size, activation=None)) continue self.layers.append(TCSConv(in_channels, in_channels,",
"activation=nn.ReLU, separable=True): super().__init__() tcsconv = [] if separable: tcsconv += [ nn.Conv1d(in_channels, in_channels,",
"TCSConv(512, 512, 87, dilation=2), TCSConv(512, 1024, 1, separable=False), nn.Conv1d(1024, n_class, kernel_size=1, padding='same') ]",
"// 2 class QuartznetSmall(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args,",
"75, 5), TCSConv(512, 512, 87, dilation=2), TCSConv(512, 1024, 1, separable=False), nn.Conv1d(1024, n_class, kernel_size=1,",
"= nn.Sequential(*tcsconv) def forward(self, x): return self.tcsconv(x) class ResidualTCSConvBlock(nn.Module): def __init__(self, in_channels, out_channels,",
"out_channels, kernel_size, num_blocks, activation=nn.ReLU): super().__init__() self.num_blocks = num_blocks self.layers = [] for i",
"kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), ResidualTCSConvBlock(256, 256, 39, 5), ResidualTCSConvBlock(256, 512,",
"def __init__(self, in_channels, out_channels, kernel_size, dilation=1, activation=nn.ReLU, separable=True): super().__init__() tcsconv = [] if",
"nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same'), nn.BatchNorm1d(out_channels)) self.last_activation = activation() def forward(self, x): y =",
"nn.BatchNorm1d(out_channels)) self.last_activation = activation() def forward(self, x): y = self.layers(x) return self.last_activation(self.res_block(x) +",
"Quartznet(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args, **kwargs) self.model =",
"*args, **kwargs): super().__init__(n_feats, n_class, *args, **kwargs) self.model = nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256, kernel_size=33,",
"def forward(self, spectrogram, *args, **kwargs): res = self.model(spectrogram.transpose(2, 1)).transpose(1, 2) return res def",
"5), ResidualTCSConvBlock(256, 512, 51, 5), ResidualTCSConvBlock(512, 512, 63, 5), ResidualTCSConvBlock(512, 512, 75, 5),",
"out_channels, kernel_size, padding='same', dilation=dilation) ] tcsconv.append(nn.BatchNorm1d(out_channels)) if activation is not None: tcsconv.append(activation()) self.tcsconv",
"self.layers = nn.Sequential(*self.layers) self.res_block = nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same'), nn.BatchNorm1d(out_channels)) self.last_activation = activation()",
"Sequential from hw_asr.base import BaseModel class TCSConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, dilation=1,",
"n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class, *args, **kwargs) self.model = nn.Sequential(* [ nn.Conv1d(n_feats,",
"[ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), TCSConv(256, 256, 1,",
"in_channels, out_channels, kernel_size, num_blocks, activation=nn.ReLU): super().__init__() self.num_blocks = num_blocks self.layers = [] for",
"TCSConv(512, 1024, 1, separable=False), nn.Conv1d(1024, n_class, kernel_size=1, padding='same') ] ) def forward(self, spectrogram,",
"return self.tcsconv(x) class ResidualTCSConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, num_blocks, activation=nn.ReLU): super().__init__() self.num_blocks",
"5), ResidualTCSConvBlock(256, 256, 39, 5), ResidualTCSConvBlock(256, 512, 51, 5), ResidualTCSConvBlock(512, 512, 63, 5),",
"nn.Conv1d(256, n_class, kernel_size=1, padding='same') ] ) def forward(self, spectrogram, *args, **kwargs): res =",
"transform_input_lengths(self, input_lengths): return input_lengths // 2 class QuartznetSmall(BaseModel): def __init__(self, n_feats, n_class, *args,",
"= nn.Sequential(*self.layers) self.res_block = nn.Sequential(nn.Conv1d(in_channels, out_channels, kernel_size=1, padding='same'), nn.BatchNorm1d(out_channels)) self.last_activation = activation() def",
"def transform_input_lengths(self, input_lengths): return input_lengths // 2 class QuartznetSmall(BaseModel): def __init__(self, n_feats, n_class,",
"256, 1, separable=False), nn.Conv1d(256, n_class, kernel_size=1, padding='same') ] ) def forward(self, spectrogram, *args,",
"self.layers = [] for i in range(num_blocks): if i + 1 == num_blocks:",
"activation() def forward(self, x): y = self.layers(x) return self.last_activation(self.res_block(x) + y) class Quartznet(BaseModel):",
"dilation=1, activation=nn.ReLU, separable=True): super().__init__() tcsconv = [] if separable: tcsconv += [ nn.Conv1d(in_channels,",
"ResidualTCSConvBlock(512, 512, 75, 5), TCSConv(512, 512, 87, dilation=2), TCSConv(512, 1024, 1, separable=False), nn.Conv1d(1024,",
"self.model(spectrogram.transpose(2, 1)).transpose(1, 2) return res def transform_input_lengths(self, input_lengths): return input_lengths // 2 class",
"num_blocks: self.layers.append(TCSConv(in_channels, out_channels, kernel_size, activation=None)) continue self.layers.append(TCSConv(in_channels, in_channels, kernel_size, activation=activation)) self.layers = nn.Sequential(*self.layers)",
"res def transform_input_lengths(self, input_lengths): return input_lengths // 2 class QuartznetSmall(BaseModel): def __init__(self, n_feats,",
"super().__init__() tcsconv = [] if separable: tcsconv += [ nn.Conv1d(in_channels, in_channels, kernel_size, padding='same',",
"self.last_activation = activation() def forward(self, x): y = self.layers(x) return self.last_activation(self.res_block(x) + y)",
"*args, **kwargs) self.model = nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256,",
"ResidualTCSConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, num_blocks, activation=nn.ReLU): super().__init__() self.num_blocks = num_blocks self.layers",
"input_lengths // 2 class QuartznetSmall(BaseModel): def __init__(self, n_feats, n_class, *args, **kwargs): super().__init__(n_feats, n_class,",
"nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), TCSConv(256, 256, 1, separable=False),",
"= self.model(spectrogram.transpose(2, 1)).transpose(1, 2) return res def transform_input_lengths(self, input_lengths): return input_lengths // 2",
"not None: tcsconv.append(activation()) self.tcsconv = nn.Sequential(*tcsconv) def forward(self, x): return self.tcsconv(x) class ResidualTCSConvBlock(nn.Module):",
"y = self.layers(x) return self.last_activation(self.res_block(x) + y) class Quartznet(BaseModel): def __init__(self, n_feats, n_class,",
"nn.Sequential(* [ nn.Conv1d(n_feats, out_channels=256, kernel_size=33, stride=2, padding=16), ResidualTCSConvBlock(256, 256, 33, 5), ResidualTCSConvBlock(256, 256,",
"87, dilation=2), TCSConv(512, 1024, 1, separable=False), nn.Conv1d(1024, n_class, kernel_size=1, padding='same') ] ) def",
"33, 5), TCSConv(256, 256, 1, separable=False), nn.Conv1d(256, n_class, kernel_size=1, padding='same') ] ) def",
"1)).transpose(1, 2) return res def transform_input_lengths(self, input_lengths): return input_lengths // 2 class QuartznetSmall(BaseModel):",
"TCSConv(256, 256, 1, separable=False), nn.Conv1d(256, n_class, kernel_size=1, padding='same') ] ) def forward(self, spectrogram,",
"out_channels, kernel_size=1, padding='same', dilation=dilation) ] else: tcsconv += [ nn.Conv1d(in_channels, out_channels, kernel_size, padding='same',",
"512, 87, dilation=2), TCSConv(512, 1024, 1, separable=False), nn.Conv1d(1024, n_class, kernel_size=1, padding='same') ] )",
"__init__(self, in_channels, out_channels, kernel_size, dilation=1, activation=nn.ReLU, separable=True): super().__init__() tcsconv = [] if separable:",
"res = self.model(spectrogram.transpose(2, 1)).transpose(1, 2) return res def transform_input_lengths(self, input_lengths): return input_lengths //"
] |
[
"operations = [ migrations.RemoveField( model_name='record', name='result', ), migrations.AddField( model_name='record', name='classify', field=models.CharField(blank=True, max_length=128), ),",
"Migration(migrations.Migration): dependencies = [ ('app', '0006_auto_20190908_1239'), ] operations = [ migrations.RemoveField( model_name='record', name='result',",
"by Django 2.2 on 2019-09-09 04:07 from django.db import migrations, models class Migration(migrations.Migration):",
"dependencies = [ ('app', '0006_auto_20190908_1239'), ] operations = [ migrations.RemoveField( model_name='record', name='result', ),",
"on 2019-09-09 04:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [",
"from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0006_auto_20190908_1239'), ]",
"django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0006_auto_20190908_1239'), ] operations",
"Generated by Django 2.2 on 2019-09-09 04:07 from django.db import migrations, models class",
"migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0006_auto_20190908_1239'), ] operations = [",
"('app', '0006_auto_20190908_1239'), ] operations = [ migrations.RemoveField( model_name='record', name='result', ), migrations.AddField( model_name='record', name='classify',",
"2.2 on 2019-09-09 04:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies =",
"Django 2.2 on 2019-09-09 04:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies",
"models class Migration(migrations.Migration): dependencies = [ ('app', '0006_auto_20190908_1239'), ] operations = [ migrations.RemoveField(",
"# Generated by Django 2.2 on 2019-09-09 04:07 from django.db import migrations, models",
"'0006_auto_20190908_1239'), ] operations = [ migrations.RemoveField( model_name='record', name='result', ), migrations.AddField( model_name='record', name='classify', field=models.CharField(blank=True,",
"import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0006_auto_20190908_1239'), ] operations =",
"<reponame>LuoBingjun/Pic_demo # Generated by Django 2.2 on 2019-09-09 04:07 from django.db import migrations,",
"= [ migrations.RemoveField( model_name='record', name='result', ), migrations.AddField( model_name='record', name='classify', field=models.CharField(blank=True, max_length=128), ), ]",
"2019-09-09 04:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app',",
"] operations = [ migrations.RemoveField( model_name='record', name='result', ), migrations.AddField( model_name='record', name='classify', field=models.CharField(blank=True, max_length=128),",
"04:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0006_auto_20190908_1239'),",
"= [ ('app', '0006_auto_20190908_1239'), ] operations = [ migrations.RemoveField( model_name='record', name='result', ), migrations.AddField(",
"[ ('app', '0006_auto_20190908_1239'), ] operations = [ migrations.RemoveField( model_name='record', name='result', ), migrations.AddField( model_name='record',",
"class Migration(migrations.Migration): dependencies = [ ('app', '0006_auto_20190908_1239'), ] operations = [ migrations.RemoveField( model_name='record',"
] |
[
"as nn import torch.nn.functional as F class Rnn(nn.Module): def __init__(self): super(Rnn,self).__init__() self.lstm=nn.LSTM(input_size=300,hidden_size=150,bidirectional=True) self.final=nn.Linear(600,8)",
"torch.nn.functional as F class Rnn(nn.Module): def __init__(self): super(Rnn,self).__init__() self.lstm=nn.LSTM(input_size=300,hidden_size=150,bidirectional=True) self.final=nn.Linear(600,8) def forward(self,x): tmp1,tmp2=self.lstm(x)#[500,45,300]",
"as F class Rnn(nn.Module): def __init__(self): super(Rnn,self).__init__() self.lstm=nn.LSTM(input_size=300,hidden_size=150,bidirectional=True) self.final=nn.Linear(600,8) def forward(self,x): tmp1,tmp2=self.lstm(x)#[500,45,300] x=torch.cat((tmp1[0],tmp1[-1]),1)#[45,600]",
"Artificial Intelligence/PA3/src/rnn.py<gh_stars>0 import torch import torch.nn as nn import torch.nn.functional as F class",
"Intelligence/PA3/src/rnn.py<gh_stars>0 import torch import torch.nn as nn import torch.nn.functional as F class Rnn(nn.Module):",
"<reponame>youyl/Programming-Assignments-THU<filename>Introduction to Artificial Intelligence/PA3/src/rnn.py<gh_stars>0 import torch import torch.nn as nn import torch.nn.functional as",
"import torch.nn.functional as F class Rnn(nn.Module): def __init__(self): super(Rnn,self).__init__() self.lstm=nn.LSTM(input_size=300,hidden_size=150,bidirectional=True) self.final=nn.Linear(600,8) def forward(self,x):",
"Rnn(nn.Module): def __init__(self): super(Rnn,self).__init__() self.lstm=nn.LSTM(input_size=300,hidden_size=150,bidirectional=True) self.final=nn.Linear(600,8) def forward(self,x): tmp1,tmp2=self.lstm(x)#[500,45,300] x=torch.cat((tmp1[0],tmp1[-1]),1)#[45,600] output=self.final(x)#[45,8] return output",
"nn import torch.nn.functional as F class Rnn(nn.Module): def __init__(self): super(Rnn,self).__init__() self.lstm=nn.LSTM(input_size=300,hidden_size=150,bidirectional=True) self.final=nn.Linear(600,8) def",
"class Rnn(nn.Module): def __init__(self): super(Rnn,self).__init__() self.lstm=nn.LSTM(input_size=300,hidden_size=150,bidirectional=True) self.final=nn.Linear(600,8) def forward(self,x): tmp1,tmp2=self.lstm(x)#[500,45,300] x=torch.cat((tmp1[0],tmp1[-1]),1)#[45,600] output=self.final(x)#[45,8] return",
"torch import torch.nn as nn import torch.nn.functional as F class Rnn(nn.Module): def __init__(self):",
"import torch import torch.nn as nn import torch.nn.functional as F class Rnn(nn.Module): def",
"F class Rnn(nn.Module): def __init__(self): super(Rnn,self).__init__() self.lstm=nn.LSTM(input_size=300,hidden_size=150,bidirectional=True) self.final=nn.Linear(600,8) def forward(self,x): tmp1,tmp2=self.lstm(x)#[500,45,300] x=torch.cat((tmp1[0],tmp1[-1]),1)#[45,600] output=self.final(x)#[45,8]",
"to Artificial Intelligence/PA3/src/rnn.py<gh_stars>0 import torch import torch.nn as nn import torch.nn.functional as F",
"import torch.nn as nn import torch.nn.functional as F class Rnn(nn.Module): def __init__(self): super(Rnn,self).__init__()",
"torch.nn as nn import torch.nn.functional as F class Rnn(nn.Module): def __init__(self): super(Rnn,self).__init__() self.lstm=nn.LSTM(input_size=300,hidden_size=150,bidirectional=True)"
] |
[
"work_form.volume_number.data != None: volume = rdflib.BNode() self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume)) self.graph.add((volume, SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume,",
"# research statements, and FAST subjects self.creative_works = rdflib.Graph() self.research_statements = rdflib.Graph() self.fast_subjects",
"EmailProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') connection = config_manager.conns generated_by = kwargs.get(\"generated_by\")",
"rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) message = \"New {} as {} to Colorado College's Scholarship",
"self.graph.add((work, SCHEMA.title, rdflib.Literal(work_form.book_title.data))) if work_form.isbn.data is not None: self.graph.add((work, SCHEMA.isbn, rdflib.Literal(work_form.isbn.data))) if work_form.editionStatement.data",
"span = soup.new_tag(\"span\") span.string = \"v. {}\".format(vol_number) col_2.append(span) issue_number = citation.get(\"issue_number\") if issue_number",
"em = soup.new_tag(\"em\") em.string = citation.get(\"journal_title\") col_2.append(em) if \"year\" in citation: span =",
"given_name is not None: profile.graph.add( (person_iri, SCHEMA.givenName.rdflib, rdflib.Literal(given_name, lang=\"en\"))) family_name = form.get(\"family_name\") if",
"not None: profile.graph.add( (person_iri, SCHEMA.givenName.rdflib, rdflib.Literal(given_name, lang=\"en\"))) family_name = form.get(\"family_name\") if family_name is",
"MIMEText import mimetypes import click import rdflib import requests from bs4 import BeautifulSoup",
"len(work_form.iri.data) > 0: work_iri = rdflib.URIRef(work_form.iri.data) else: # Mint IRI for new work",
"git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: self.scholarship_repo.update_file(file_path,",
"rdflib.Literal(email))) add_qualified_generation(profile.graph, person_iri, generated_by) dept_year = kwargs.get(\"year-iri\") if dept_year is not None: dept_year_iri",
"issue is not None: issue_number = work_graph.value(subject=issue, predicate=SCHEMA.issueNumber) result = connection.datastore.query(\"\"\"SELECT ?issue WHERE",
"rdflib.Literal(work_form.page_end.data))) journal = rdflib.BNode() self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical)) self.graph.add((journal, SCHEMA.name, rdflib.Literal(work_form.journal_title.data))) issue, volume =",
"kwargs.get(\"generated_by\") work_form = kwargs.get(\"work_form\") BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema sparql = EMAIL_LOOKUP.format(",
"person_iri = rdflib.URIRef(form.get(\"iri\")) profile = EmailProfile(config_manager, person_iri) msg = \"\" results = connection.datastore.query(",
"def __add_book__(self, work, work_form): self.graph.add((work, rdflib.RDF.type, SCHEMA.Book)) self.graph.add((work, SCHEMA.title, rdflib.Literal(work_form.book_title.data))) if work_form.isbn.data is",
"\"\\n{}:\\t{}\".format(row, field.data) self.__send_email__(\"Added New Work\", email_body) return work_iri def new(self, message): \"\"\"Adds a",
"based on form values\"\"\" config_manager = kwargs.get('config_manager') connection = config_manager.conns BF = config_manager.nsm.bf",
"work Args: work_iri(rdflib.URIRef): Creative Work IRI for Article work_form(Flask.request.form): Dict of form values",
"IRI for new work if \"doi\" in work_form and len(work_form.doi.data) > 0: work_iri",
"self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter)) book_bnode = rdflib.BNode() self.graph.add((work_iri, SCHEMA.partOf, book_bnode)) self.__add_book__(book_bnode, work_form) elif citation_type.startswith(\"book\"):",
"row in form_subjects: fast_id, fast_label = row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri = fast_id else:",
"!= None: volume = rdflib.BNode() self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume)) self.graph.add((volume, SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume, SCHEMA.partOf,",
"email to administrators with attached profile graph\"\"\" message = MIMEMultipart() message[\"From\"] = self.email.get(\"user\")",
"self.email.get(\"host\"), self.email.get(\"port\")) email_server.ehlo() if self.email.get(\"tls\"): email_server.starttls() body = MIMEText(body, _charset=\"UTF-8\") message.attach(body) graph_turtle =",
"soup.new_tag(\"a\", href=citation.get(\"url\")) work_link.string = name col_2.append(work_link) else: span = soup.new_tag(\"span\") span.string = name",
"def delete_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user = kwargs.get(\"current_user\") config_manager =",
"recipients]) if len(carbon_copy) > 0: message[\"Cc\"] = ','.join(carbon_copy) recipients.extend(carbon_copy) body = MIMEText(text, _charset=\"UTF-8\")",
"config_manager.conns revised_by = kwargs.get(\"revised_by\") raw_citation = kwargs.get(\"citation\") work_type = kwargs.get(\"work_type\", \"article\") if revised_by",
"SCHEMA.author, self.person_iri)) elif generated_by: self.person_iri = generated_by self.graph.add((work_iri, SCHEMA.author, generated_by)) if \"url\" in",
"schema:name ?label . } \"\"\"): entity, label = row break volume, issue =",
"file_path=self.current_year_path, graph_name=\"current_year\", message=\"{} person to Department for school year\".format(action)) self.__save_graph__( git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\",",
"= kwargs.get(\"person\") self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.profile = kwargs.get(\"profile\") self.scholarship_repo =",
"= rdflib.URIRef(work_form.author.data) self.graph.add((work_iri, SCHEMA.author, self.person_iri)) elif generated_by: self.person_iri = generated_by self.graph.add((work_iri, SCHEMA.author, generated_by))",
"iri_subject)) profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) profile.update(msg) return {\"message\":",
"= rdflib.URIRef(periodical) bibcat.replace_iri(work_graph, entity, new_work) entity = new_work if volume is not None:",
"SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data))) if work_form.page_end.data !=None: self.graph.add((work_iri, SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data))) journal = rdflib.BNode() self.graph.add((journal, rdflib.RDF.type,",
"= datetime.datetime.utcnow() if now.month < 7: start_year = now.year - 1 end_year =",
"citation_type = work_form.citation_type.data self.graph.add((work_iri, CITE.citationType, rdflib.Literal(citation_type))) if \"author\" in work_form and len(work_form.author.data) >",
"len(issue_number ) > 0: span = soup.new_tag(\"span\") span.string = \" no. {}\".format(issue_number) col_2.append(span)",
"= \"- {}.\" else: page_string = \"{}.\" span.string = page_string.format(page_end) col_2.append(span) div.append(col_2) col_3",
"if \"page_start\" in citation: page_string = \"- {}.\" else: page_string = \"{}.\" span.string",
"self.graph.add((work_iri, SCHEMA.partOf, issue)) elif volume is not None: self.graph.add((work_iri, SCHEMA.partOf, volume)) else: #",
"hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest() def __get_content__(self, repo_name, content): raw_turtle = None try: raw_turtle = content.decoded_content",
"= kwargs.get(\"graph_name\") graph = getattr(self, graph_name) message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) git_graph =",
"= rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri,",
"research statements and fast subjects self.research_statements.add((existing_stmt, SCHEMA.about, subject)) self.fast_subjects.add((subject, rdflib.RDF.type, BF.Topic)) subject_label =",
"author = kwargs.get(\"author\") connection = config_manager.conns iri = kwargs.get(\"iri\") __email_work__( config=config, carbon_copy=[current_user.data.get('mail'),], subject=\"Delete",
"[]) message = MIMEMultipart() message[\"From\"] = sender message[\"Subject\"] = subject message[\"To\"] = \",\".join([\"<{0}>\".format(r)",
"self.graph.add( (work, SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data))) if work_form.editor.data is not None: self.graph.add((work, SCHEMA.editor, rdflib.Literal(work_form.editor.data))) if",
"def __update_fast_subjects__(self): existing_subjects, new_subjects = set(), set() existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) for",
"subject in list(new_subjects.difference(existing_subjects)): # Add new subject to research statements and fast subjects",
"graph_name=\"fast_subjects\", message=\"Fast subject added\") class EmailProfile(object): \"\"\"Simple Email Profile class that creates a",
"self.person_iri = generated_by self.graph.add((work_iri, SCHEMA.author, generated_by)) if \"url\" in work_form and len(work_form.url.data) >",
"SCHEMA.name, rdflib.Literal(work_form.journal_title.data))) issue, volume = None, None if work_form.volume_number.data != None: volume =",
"self.profile.graph.value( subject=existing_stmt, predicate=SCHEMA.description) if new_description is not None \\ and str(current_description) != str(new_description):",
"Updated Creative Work\") return {\"message\": \"New work has been submitted for review\", \"status\":",
"SCHEMA.Periodical)) self.graph.add((journal, SCHEMA.name, rdflib.Literal(work_form.journal_title.data))) issue, volume = None, None if work_form.volume_number.data != None:",
"self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) for row in self.research_statements.objects( subject=existing_stmt, predicate=SCHEMA.about): existing_subjects.add(row) for fast_heading in",
"\"status\": True} def edit_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user_email = kwargs.get(\"current_user_email\")",
"= GitProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') author = kwargs.get(\"author\") connection =",
"?volume) BIND(<{1}> as ?periodical) BIND(\"{2}\" as ?issue_number) }}\"\"\".format(volume, periodical, issue_number) ) if result",
"changes to {}'s academic profile:\\n\".format( generated_by, form['label']) statement_iri_results = connection.datastore.query( RESEARCH_STMT_IRI.format( person_iri)) if",
"now.year - 1 end_year = now.year else: start_year = now.year end_year = now.year",
"= subject email_server = smtplib.SMTP( self.email.get(\"host\"), self.email.get(\"port\")) email_server.ehlo() if self.email.get(\"tls\"): email_server.starttls() body =",
"\"status\": True, \"iri\": work_iri} def add_profile(**kwargs): \"\"\"Adds a profile stub to scholarship graph\"\"\"",
"= kwargs.get('config_manager') connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema form =",
"# Quick dedup server.sendmail(sender, recipients, message.as_string()) server.close() def generate_citation_html(citation): soup = BeautifulSoup(\"\", 'lxml')",
"carbon_copy=[current_user.data.get('mail'),], subject=\"Delete Request\", text=\"Delete citation {} for {}\\nrequested by {} on {}\".format( iri,",
"len(page_start) > 0: span = soup.new_tag(\"span\") span.string = \"p. {}\".format(page_start) col_2.append(span) page_end =",
"blob = self.scholarship_repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle,",
"list(set(self.recipients)) # Quick dedup email_server.sendmail(self.email.get(\"user\"), recipients, message.as_string()) email_server.close() def __add_article__(self, work_iri, work_form): \"\"\"Article",
"config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema form = kwargs.get('form') current_user = kwargs.get(\"current_user\")",
"includes logins and administor \"\"\" work_graph = kwargs.get(\"graph\") config = kwargs.get(\"config\") sender =",
"work if \"doi\" in work_form and len(work_form.doi.data) > 0: work_iri = rdflib.URIRef(work_form.doi.data) else:",
"= form.get(\"orcid\") else: person_uri = \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()) person_iri = rdflib.URIRef(person_uri) if generated_by is",
"to {}'s academic profile:\\n\".format( generated_by, form['label']) statement_iri_results = connection.datastore.query( RESEARCH_STMT_IRI.format( person_iri)) if len(statement_iri_results)",
"git_profile = GitProfile(config) current_user_email = kwargs.get(\"current_user_email\") config_manager = kwargs.get('config_manager') connection = config_manager.conns revised_by",
"\"btn btn-danger\", \"onclick\": delete_click, \"type=\": \"input\"}) delete_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-trash-alt\"})) col_3.append(delete_a) div.append(col_3) return",
"for r in recipients]) if len(carbon_copy) > 0: message[\"Cc\"] = ','.join(carbon_copy) recipients.extend(carbon_copy) body",
"import current_app from github import Github, GithubException import utilities from .sparql import EMAIL_LOOKUP,",
"from .sparql import add_qualified_generation, add_qualified_revision BF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE = rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV =",
"work_iri def add(self, work_form, generated_by=None): work_iri = self.__populate_work__(work_form, generated_by) email_body = \"Properties and",
"rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {}\".format(label), lang=\"en\"))) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib,",
"carbon_copy = kwargs.get(\"carbon_copy\", []) message = MIMEMultipart() message[\"From\"] = sender message[\"Subject\"] = subject",
"git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\", message=\"{} {} to CC People\".format(action, person_label)) self.__save_graph__( git_repo=self.tiger_repo, file_path=self.current_year_path, graph_name=\"current_year\",",
"= config_manager.conns iri = kwargs.get(\"iri\") __email_work__( config=config, carbon_copy=[current_user.data.get('mail'),], subject=\"Delete Request\", text=\"Delete citation {}",
"and save to datastore\"\"\" config = kwargs.get(\"config\") profile = EmailProfile(config) current_user = kwargs.get(\"current_user\")",
"CITE.authorString, rdflib.Literal(work_form.author_string.data))) if generated_by: add_qualified_generation(self.graph, work_iri, generated_by) citation_type = work_form.citation_type.data self.graph.add((work_iri, CITE.citationType, rdflib.Literal(citation_type)))",
"work Args: form(Flask.request.form): Dict of form values \"\"\" if len(work_form.iri.data) > 0: work_iri",
"attached profile graph\"\"\" message = MIMEMultipart() message[\"From\"] = self.email.get(\"user\") message[\"To\"] = \",\".join([\"<{0}>\".format(r) for",
"profile\"\"\" global BACKGROUND_THREAD BACKGROUND_THREAD = ProfileUpdateThread( config=self.config, msg=message, person=self.person_iri, profile=self) BACKGROUND_THREAD.start() self.__send_email__(\"Updating Profile\",",
"rdflib.URIRef(citation.iri), revised_by) email_subject = 'Edited Creative Work {}'.format(citation.iri) __email_work__(graph=temp_work, config=config, carbon_copy=[current_user_email,], subject=email_subject, text=\"Edited",
"BeautifulSoup from flask import current_app from github import Github, GithubException import utilities from",
"rdflib.Graph() self.fast_subjects = rdflib.Graph() self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle =",
"hashlib import io import os import pprint import smtplib import subprocess import threading",
"volume, new_volume) if issue is not None: issue_number = work_graph.value(subject=issue, predicate=SCHEMA.issueNumber) result =",
"= result[0].get(\"periodical\").get(\"value\") if periodical != str(entity): new_work = rdflib.URIRef(periodical) bibcat.replace_iri(work_graph, entity, new_work) entity",
"now.month < 7: start_year = now.year - 1 end_year = now.year else: start_year",
"is not None \\ and str(current_description) != str(new_description): self.research_statements.remove((existing_stmt, SCHEMA.description, current_description)) self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt,",
"academic year # and CC people now = datetime.datetime.utcnow() if now.month < 7:",
"production. Keyword args: work_graph(rdflib.Graph): RDF Graph of Citation config: Configuration includes logins and",
"self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) current_description = self.research_statements.value( subject=existing_stmt, predicate=SCHEMA.description) new_description = self.profile.graph.value( subject=existing_stmt, predicate=SCHEMA.description)",
"\"doi\" in work_form and len(work_form.doi.data) > 0: work_iri = rdflib.URIRef(work_form.doi.data) else: work_iri =",
"and parsing latest RDF for current academic year # and CC people now",
"file_path = kwargs.get(\"file_path\") graph_name = kwargs.get(\"graph_name\") branch = kwargs.get(\"branch\") message = kwargs.get(\"message\", \"Updating",
"the following changes to {}'s academic profile:\\n\".format( generated_by, form['label']) statement_iri_results = connection.datastore.query( RESEARCH_STMT_IRI.format(",
"rdflib.RDF.type, SCHEMA.Book)) self.graph.add((work, SCHEMA.title, rdflib.Literal(work_form.book_title.data))) if work_form.isbn.data is not None: self.graph.add((work, SCHEMA.isbn, rdflib.Literal(work_form.isbn.data)))",
"work_form) elif citation_type.startswith(\"book\"): self.__add_book__(work_iri, work_form) else: abort(500) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about,",
"BACKGROUND_THREAD.start() self.__send_email__(\"Updating Profile\", message) def __email_work__(**kwargs): \"\"\"Function takes a work graph and configuration",
"def update_all(self, person_label, action=\"Add\", connection=None): self.__save_graph__( git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\", message=\"{} {} to CC",
"self.graph.add((work_iri, SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri, CITE.authorString, rdflib.Literal(work_form.author_string.data))) if generated_by: add_qualified_generation(self.graph, work_iri, generated_by) citation_type =",
"if \"journal_title\" in citation: em = soup.new_tag(\"em\") em.string = citation.get(\"journal_title\") col_2.append(em) if \"year\"",
"content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') def",
"rdflib.URIRef(title), person_iri)) statement = kwargs.get(\"statement\", form.get(\"research_stmt\")) if statement is not None: statement_iri =",
"href=citation.get(\"url\")) work_link.string = name col_2.append(work_link) else: span = soup.new_tag(\"span\") span.string = name col_2.append(span)",
"now.year else: start_year = now.year end_year = now.year + 1 self.current_year_path = \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format(",
"SCHEMA.about, subject)) for subject in list(new_subjects.difference(existing_subjects)): # Add new subject to research statements",
"FILTER(CONTAINS(?name, \"{0}\")) }}\"\"\".format(label)) if result and len(result) > 0: periodical = result[0].get(\"periodical\").get(\"value\") if",
"\",\".join([\"<{0}>\".format(r) for r in recipients]) if len(carbon_copy) > 0: message[\"Cc\"] = ','.join(carbon_copy) recipients.extend(carbon_copy)",
"GithubException: repo = getattr(self, repo_name) blob = repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) return raw_turtle",
"commits any changes to self.__update_fast_subjects__() self.__update_research_statements__() self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=self.statement_msg) self.__save_graph__( file_path =\"/data/cc-fast-subjects.ttl\",",
"if result and len(result) > 0: new_volume = rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph, volume, new_volume) if",
"work_graph.value(predicate=SCHEMA.partOf, object=entity) schema_class = work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type) if schema_class is SCHEMA.volumeNumber: volume = volume_or_issue",
"= list(set(self.recipients)) # Quick dedup email_server.sendmail(self.email.get(\"user\"), recipients, message.as_string()) email_server.close() def __add_article__(self, work_iri, work_form):",
"is not None: statement_iri = rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri,",
"citation.populate_book() citation.add_book() if revised_by: add_qualified_revision(temp_work, rdflib.URIRef(citation.iri), revised_by) email_subject = 'Edited Creative Work {}'.format(citation.iri)",
"flask import current_app from github import Github, GithubException import utilities from .sparql import",
"git_repo=self.tiger_repo, file_path=self.current_year_path, graph_name=\"current_year\", message=\"{} person to Department for school year\".format(action)) self.__save_graph__( git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\",",
"RDF Graph of Citation config: Configuration includes logins and administor \"\"\" work_graph =",
"SCHEMA.isbn, rdflib.Literal(work_form.isbn.data))) if work_form.editionStatement.data is not None: self.graph.add( (work, SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data))) if work_form.editor.data",
"page_start = citation.get(\"page_start\") if page_start and len(page_start) > 0: span = soup.new_tag(\"span\") span.string",
"message=self.statement_msg) self.__save_graph__( file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") class EmailProfile(object): \"\"\"Simple Email Profile",
"soup.new_tag(\"span\") span.string = name col_2.append(span) if \"journal_title\" in citation: em = soup.new_tag(\"em\") em.string",
"connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema results = connection.datastore.query( EMAIL_LOOKUP.format(",
"= set(), set() existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) for row in self.research_statements.objects( subject=existing_stmt,",
"subject message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in recipients]) if len(carbon_copy) > 0: message[\"Cc\"]",
"self.__populate_work__(work_form, generated_by) email_body = \"Properties and Values for Creative Work {}\".format(work_iri) for row",
"work_form): self.graph.add((work, rdflib.RDF.type, SCHEMA.Book)) self.graph.add((work, SCHEMA.title, rdflib.Literal(work_form.book_title.data))) if work_form.isbn.data is not None: self.graph.add((work,",
"utilities.Article_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_article() citation.add_article() elif work_type.startswith(\"book\"): citation = utilities.Book_Citation(raw_citation, temp_work,",
"rdflib.RDF.type, SCHEMA.PublicationVolume)) self.graph.add((volume, SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume, SCHEMA.partOf, journal)) if work_form.issue_number.data != None: issue",
"generated_by: self.person_iri = generated_by self.graph.add((work_iri, SCHEMA.author, generated_by)) if \"url\" in work_form and len(work_form.url.data)",
"work_form) elif citation_type.startswith(\"book chapter\"): self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter)) book_bnode = rdflib.BNode() self.graph.add((work_iri, SCHEMA.partOf, book_bnode))",
"io.StringIO( work_graph.serialize(format='turtle').decode()) attachment = MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='work.ttl') message.attach(attachment) #try: server = smtplib.SMTP(config.get('EMAIL')['host'],",
"cc_github.get_organization(\"Tutt-Library\") self.statement_msg = kwargs.get(\"msg\") self.person_iri = kwargs.get(\"person\") self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph()",
"\"col-7\"}) if \"article_title\" in citation: name = citation.get(\"article_title\") elif \"title\" in citation: name",
"> 0: span = soup.new_tag(\"span\") span.string = \" no. {}\".format(issue_number) col_2.append(span) page_start =",
"= subject message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in recipients]) if len(carbon_copy) > 0:",
"rdf:type schema:Periodical ; schema:name ?label . } \"\"\"): entity, label = row break",
"len(statement_iri_results) > 0: statement_iri = rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph, statement_iri, generated_by) else: statement_iri =",
"\"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year, end_year) self.current_year = rdflib.Graph() self.cc_people = rdflib.Graph() self.tiger_repo = self.tutt_github.get_repo(\"tiger-catalog\") for",
"Graph of Citation config: Configuration includes logins and administor \"\"\" work_graph = kwargs.get(\"graph\")",
"(work, SCHEMA.description, rdflib.Literal(work_form.notes.data))) def __populate_work__(self, work_form, generated_by=None): \"\"\"Populates graph with new work Args:",
"citation: name = citation.get(\"article_title\") elif \"title\" in citation: name = citation.get(\"title\") if \"url\"",
"added\") if connection: self.__reload_triplestore__(connection) def __reload_triplestore__(self, config_mgr): data_upload = [] for row in",
"form.get(\"given_name\") if given_name is not None: profile.graph.add( (person_iri, SCHEMA.givenName.rdflib, rdflib.Literal(given_name, lang=\"en\"))) family_name =",
"SCHEMA.PublicationVolume)) self.graph.add((volume, SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume, SCHEMA.partOf, journal)) if work_form.issue_number.data != None: issue =",
"generated_by, form['label']) statement_iri_results = connection.datastore.query( RESEARCH_STMT_IRI.format( person_iri)) if len(statement_iri_results) > 0: statement_iri =",
"kwargs.get(\"branch\") message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) graph = getattr(self, graph_name) graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest()",
"= hashlib.sha1(graph.serialize(format='n3')).hexdigest() if graph_sha1 == self.graph_hashes[graph_name]: return git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch:",
"via email to the Administrators for review.\"\"\" def __init__(self, config, person_iri): self.config =",
".sparql import add_qualified_generation, add_qualified_revision BF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE = rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV = rdflib.Namespace(\"http://www.w3.org/ns/prov#\")",
"if len(statement) > 0: profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) form_subjects = form.getlist(\"subjects\") new_subjects",
"?issue WHERE {{ ?issue rdf:type schema:issueNumber ; schema:issueNumber ?issue_number . OPTIONAL {{ ?issue",
"rdflib.URIRef(dept_year_iri) title = kwargs.get(\"title-iri\") profile.graph.add( (dept_year_iri, rdflib.URIRef(title), person_iri)) statement = kwargs.get(\"statement\", form.get(\"research_stmt\")) if",
"= soup.new_tag(\"span\") span.string = \" no. {}\".format(issue_number) col_2.append(span) page_start = citation.get(\"page_start\") if page_start",
"citation: name = citation.get(\"title\") if \"url\" in citation: work_link = soup.new_tag(\"a\", href=citation.get(\"url\")) work_link.string",
"{}'.format(citation.iri) __email_work__(graph=temp_work, config=config, carbon_copy=[current_user_email,], subject=email_subject, text=\"Edited {} revised by {} on {}, see",
"= kwargs.get(\"graph\") config = kwargs.get(\"config\") sender = config.get('EMAIL')['user'] recipients = config.get(\"ADMINS\") subject =",
"issue = None, None volume_or_issue = work_graph.value(predicate=SCHEMA.partOf, object=entity) schema_class = work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type) if",
"subject_label)) def __update_research_statements__(self): existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) current_description = self.research_statements.value( subject=existing_stmt, predicate=SCHEMA.description)",
"if fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri] = fast_label for",
"New Work\", email_body) return work_iri def new(self, message): \"\"\"Adds a new profile\"\"\" self.__send_email__(\"Add",
"= \"({0})\".format(citation.get(\"year\")) col_2.append(span) vol_number = citation.get(\"volume_number\") if vol_number and len(vol_number) > 0: span",
"kwargs.get(\"author\") connection = config_manager.conns iri = kwargs.get(\"iri\") __email_work__( config=config, carbon_copy=[current_user.data.get('mail'),], subject=\"Delete Request\", text=\"Delete",
"email_body = \"Properties and Values for Creative Work {}\".format(work_iri) for row in work_form._fields:",
"FAST subjects self.creative_works = rdflib.Graph() self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.scholarship_repo =",
"self.graph.add((work_iri, CITE.citationType, rdflib.Literal(citation_type))) if \"author\" in work_form and len(work_form.author.data) > 0: self.person_iri =",
". } \"\"\"): entity, label = row break volume, issue = None, None",
"span.string = name col_2.append(span) if \"journal_title\" in citation: em = soup.new_tag(\"em\") em.string =",
"delete_click, \"type=\": \"input\"}) delete_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-trash-alt\"})) col_3.append(delete_a) div.append(col_3) return div.prettify() def __reconcile_article__(work_graph,",
"fast_label in new_subjects.items(): iri_subject = rdflib.URIRef(fast_subject) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) existing_label = profile.fast_subjects.value(",
"volume is not None: self.graph.add((work_iri, SCHEMA.partOf, volume)) else: # Add work_iri to Journal",
"self.research_statements.remove((existing_stmt, SCHEMA.about, subject)) for subject in list(new_subjects.difference(existing_subjects)): # Add new subject to research",
"col_3 = soup.new_tag(\"div\", **{\"class\": \"col-4\"}) iri = citation.get(\"iri\") if iri: edit_click = \"editCitation('{}');\".format(iri)",
"generated_by = person_iri msg = \"{} made the following changes to {}'s academic",
"SCHEMA.about, rdflib.Literal(work_form.abstract.data))) return work_iri def add(self, work_form, generated_by=None): work_iri = self.__populate_work__(work_form, generated_by) email_body",
"Article work_form(Flask.request.form): Dict of form values \"\"\" self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.ScholarlyArticle)) self.graph.add((work_iri, SCHEMA.name, rdflib.Literal(work_form.article_title.data)))",
"name = citation.get(\"title\") if \"url\" in citation: work_link = soup.new_tag(\"a\", href=citation.get(\"url\")) work_link.string =",
"import click import rdflib import requests from bs4 import BeautifulSoup from flask import",
"rdf:type schema:issueNumber ; schema:issueNumber ?issue_number . OPTIONAL {{ ?issue schema:partOf ?volume . }}",
"work graph and configuration and emails the graph in turtle format to the",
"\"{} {}\".format(given_name, family_name) profile.graph.add((person_iri, rdflib.RDFS.label, rdflib.Literal(label, lang=\"en\"))) email = form.get(\"email\") profile.graph.add((person_iri, SCHEMA.email.rdflib, rdflib.Literal(email)))",
"= hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest() def __get_content__(self, repo_name, content): raw_turtle = None try: raw_turtle =",
"dept_year_iri = rdflib.URIRef(dept_year_iri) title = kwargs.get(\"title-iri\") profile.graph.add( (dept_year_iri, rdflib.URIRef(title), person_iri)) statement = kwargs.get(\"statement\",",
"Creative Work {}\".format(work_iri) for row in work_form._fields: if row.startswith(\"csrf_token\"): continue field = getattr(work_form,",
"new_issue = rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph, issue, new_issue) def add_creative_work(**kwargs): \"\"\"Calls utilities to populate and",
"form_subjects = form.getlist(\"subjects\") new_subjects = {} for row in form_subjects: fast_id, fast_label =",
"schema:partOf ?periodical . }} BIND(<{0}> as ?volume) BIND(<{1}> as ?periodical) BIND(\"{2}\" as ?issue_number)",
"= rdflib.Graph() self.graph.namespace_manager.bind(\"bf\", BF) self.graph.namespace_manager.bind(\"cite\", CITE) self.graph.namespace_manager.bind(\"schema\", SCHEMA) self.graph.namespace_manager.bind(\"prov\", PROV) self.email = config.get(\"EMAIL\")",
"1 self.current_year_path = \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year, end_year) self.current_year = rdflib.Graph() self.cc_people = rdflib.Graph() self.tiger_repo",
"self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"creative-works\"): self.creative_works_git = content self.creative_works.parse( data=raw_turtle,",
"statement is not None: statement_iri = rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add(",
"self.profile = kwargs.get(\"profile\") self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): try: raw_turtle =",
"\"onclick\": delete_click, \"type=\": \"input\"}) delete_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-trash-alt\"})) col_3.append(delete_a) div.append(col_3) return div.prettify() def",
"BF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE = rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV = rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA = rdflib.Namespace(\"http://schema.org/\") class",
"rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) self.graph.add((work_iri, SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri, CITE.authorString, rdflib.Literal(work_form.author_string.data))) if generated_by: add_qualified_generation(self.graph, work_iri,",
"vol_num)) if result and len(result) > 0: new_volume = rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph, volume, new_volume)",
"work_link.string = name col_2.append(work_link) else: span = soup.new_tag(\"span\") span.string = name col_2.append(span) if",
"= rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph, volume, new_volume) if issue is not None: issue_number = work_graph.value(subject=issue,",
"CITE = rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV = rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA = rdflib.Namespace(\"http://schema.org/\") class GitProfile(object): def __init__(self,",
"Args: work_iri(rdflib.URIRef): Creative Work IRI for Article work_form(Flask.request.form): Dict of form values \"\"\"",
"self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"] = hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest() # Start retrieving and parsing latest RDF for",
"as ?work) BIND(\"{1}\" as ?volumeNumber) }}\"\"\".format(entity, vol_num)) if result and len(result) > 0:",
"= '' person_iri = rdflib.URIRef(form.get(\"iri\")) profile = EmailProfile(config_manager, person_iri) msg = \"\" results",
"else: page_string = \"{}.\" span.string = page_string.format(page_end) col_2.append(span) div.append(col_2) col_3 = soup.new_tag(\"div\", **{\"class\":",
"for new work if \"doi\" in work_form and len(work_form.doi.data) > 0: work_iri =",
"SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {} {}\".format(",
"from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import mimetypes import click import",
"start_year, end_year) self.current_year = rdflib.Graph() self.cc_people = rdflib.Graph() self.tiger_repo = self.tutt_github.get_repo(\"tiger-catalog\") for content",
"subject)) self.fast_subjects.add((subject, rdflib.RDF.type, BF.Topic)) subject_label = self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label) if subject_label is not None:",
"= \"{}.\" span.string = page_string.format(page_end) col_2.append(span) div.append(col_2) col_3 = soup.new_tag(\"div\", **{\"class\": \"col-4\"}) iri",
"format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') def __save_graph__(self, **kwargs): file_path",
"None: self.graph.add((work, SCHEMA.editor, rdflib.Literal(work_form.editor.data))) if work_form.provisionActivityStatement.data is not None: self.graph.add( (work, SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data)))",
"\"\"\"Adds a new profile\"\"\" self.__send_email__(\"Add new profile\", message) def update(self, message): \"\"\"Edits existing",
"= config.get(\"TRIPLESTORE_URL\") self.tutt_github = cc_github.get_organization(\"Tutt-Library\") # Start retrieving and parsing latest RDF for",
"{}\".format(work_iri) for row in work_form._fields: if row.startswith(\"csrf_token\"): continue field = getattr(work_form, row) email_body",
"= self.__get_content__(\"tiger_repo\", content) if content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git = content self.current_year.parse(data=raw_turtle, format='turtle') if content.name.startswith(\"cc-people\"): self.cc_people_git",
"message=\"Fast subject added\") class EmailProfile(object): \"\"\"Simple Email Profile class that creates a local",
"import io import os import pprint import smtplib import subprocess import threading import",
"{}\".format( action, person_label)) self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") self.__save_graph__( git_repo=self.scholarship_repo,",
"None: vol_num = work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber) result = connection.datastore.query(\"\"\"SELECT ?volume WHERE {{ ?volume schema:partOf",
"name col_2.append(span) if \"journal_title\" in citation: em = soup.new_tag(\"em\") em.string = citation.get(\"journal_title\") col_2.append(em)",
"soup = BeautifulSoup(\"\", 'lxml') div = soup.new_tag(\"div\", **{\"class\": \"row\"}) col_1 = soup.new_tag(\"div\", **{\"class\":",
"else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject = rdflib.URIRef(fast_uri) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) profile.graph.add( (iri_subject,",
"bs4 import BeautifulSoup from flask import current_app from github import Github, GithubException import",
"os import pprint import smtplib import subprocess import threading import uuid from email.mime.multipart",
"issue_number = citation.get(\"issue_number\") if issue_number and len(issue_number ) > 0: span = soup.new_tag(\"span\")",
"end_year = now.year else: start_year = now.year end_year = now.year + 1 self.current_year_path",
"CITE.month, rdflib.Literal(work_form.month.data))) def __add_book__(self, work, work_form): self.graph.add((work, rdflib.RDF.type, SCHEMA.Book)) self.graph.add((work, SCHEMA.title, rdflib.Literal(work_form.book_title.data))) if",
"self.__add_article__(work_iri, work_form) elif citation_type.startswith(\"book chapter\"): self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter)) book_bnode = rdflib.BNode() self.graph.add((work_iri, SCHEMA.partOf,",
"creative works, # research statements, and FAST subjects self.creative_works = rdflib.Graph() self.research_statements =",
"rdflib.BNode() self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue)) self.graph.add((issue, SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data))) if volume is not None: self.graph.add((issue,",
"repo_name) blob = repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) return raw_turtle def __save_graph__(self, **kwargs): git_repo",
"file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=\"{} Research Statement for {}\".format( action, person_label)) self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/cc-fast-subjects.ttl\",",
"fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject = rdflib.URIRef(fast_uri) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) profile.graph.add( (iri_subject, rdflib.RDF.type,",
"Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.triplestore_url = config.get(\"TRIPLESTORE_URL\") self.tutt_github = cc_github.get_organization(\"Tutt-Library\") # Start retrieving and parsing",
"result and len(result) > 0: new_volume = rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph, volume, new_volume) if issue",
"research statements, and FAST subjects self.creative_works = rdflib.Graph() self.research_statements = rdflib.Graph() self.fast_subjects =",
"= config_manager.conns generated_by = kwargs.get(\"generated_by\") work_form = kwargs.get(\"work_form\") BF = config_manager.nsm.bf SCHEMA =",
"and len(result) > 0: new_volume = rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph, volume, new_volume) if issue is",
"= self.__get_content__(\"scholarship_repo\", content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"):",
"col_3.append(delete_a) div.append(col_3) return div.prettify() def __reconcile_article__(work_graph, connection): SCHEMA = rdflib.Namespace(\"http://schema.org/\") for row in",
"bibcat.replace_iri(work_graph, entity, new_work) entity = new_work if volume is not None: vol_num =",
"person_uri = \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()) person_iri = rdflib.URIRef(person_uri) if generated_by is None: generated_by =",
"git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def update_all(self, person_label,",
"kwargs.get(\"file_path\") branch = kwargs.get(\"branch\") graph_name = kwargs.get(\"graph_name\") graph = getattr(self, graph_name) message =",
"SCHEMA.description, new_description)) def run(self): # Function iterates and commits any changes to self.__update_fast_subjects__()",
"self.email.get(\"port\")) email_server.ehlo() if self.email.get(\"tls\"): email_server.starttls() body = MIMEText(body, _charset=\"UTF-8\") message.attach(body) graph_turtle = io.StringIO(",
"= getattr(work_form, row) email_body += \"\\n{}:\\t{}\".format(row, field.data) self.__send_email__(\"Added New Work\", email_body) return work_iri",
"{} for row in form_subjects: fast_id, fast_label = row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri =",
"new subject to research statements and fast subjects self.research_statements.add((existing_stmt, SCHEMA.about, subject)) self.fast_subjects.add((subject, rdflib.RDF.type,",
"email.mime.text import MIMEText import mimetypes import click import rdflib import requests from bs4",
"rdflib.Literal(work_form.isbn.data))) if work_form.editionStatement.data is not None: self.graph.add( (work, SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data))) if work_form.editor.data is",
"= config_manager.nsm.schema sparql = EMAIL_LOOKUP.format( current_user.data.get('mail').lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0:",
"self.fast_subjects.parse( data=raw_turtle, format='turtle') def __save_graph__(self, **kwargs): file_path = kwargs.get(\"file_path\") branch = kwargs.get(\"branch\") graph_name",
"update_profile(**kwargs): \"\"\"Updates existing triples based on form values\"\"\" config_manager = kwargs.get('config_manager') connection =",
"rdflib.URIRef(work_form.author.data) self.graph.add((work_iri, SCHEMA.author, self.person_iri)) elif generated_by: self.person_iri = generated_by self.graph.add((work_iri, SCHEMA.author, generated_by)) if",
"soup.new_tag(\"em\") em.string = citation.get(\"journal_title\") col_2.append(em) if \"year\" in citation: span = soup.new_tag(\"span\") span.string",
"work_graph.serialize(format='turtle').decode()) attachment = MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='work.ttl') message.attach(attachment) #try: server = smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port'])",
"work_form._fields: if row.startswith(\"csrf_token\"): continue field = getattr(work_form, row) email_body += \"\\n{}:\\t{}\".format(row, field.data) self.__send_email__(\"Added",
"raw_turtle = content.decoded_content except GithubException: blob = self.scholarship_repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) if content.name.startswith(\"cc-research-statements\"):",
"!= None: issue = rdflib.BNode() self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue)) self.graph.add((issue, SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data))) if volume",
"in citation: em = soup.new_tag(\"em\") em.string = citation.get(\"journal_title\") col_2.append(em) if \"year\" in citation:",
"if len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = None form =",
"graph_name = kwargs.get(\"graph_name\") branch = kwargs.get(\"branch\") message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) graph =",
"else: generated_by = None form = kwargs.get(\"form\") if form.get(\"orcid\"): person_uri = form.get(\"orcid\") else:",
"self.triplestore_url = self.config.get(\"TRIPLESTORE_URL\") self.graph = rdflib.Graph() self.graph.namespace_manager.bind(\"bf\", BF) self.graph.namespace_manager.bind(\"cite\", CITE) self.graph.namespace_manager.bind(\"schema\", SCHEMA) self.graph.namespace_manager.bind(\"prov\",",
"rdflib.Literal(work_form.journal_title.data))) issue, volume = None, None if work_form.volume_number.data != None: volume = rdflib.BNode()",
"**{\"class\": \"col-7\"}) if \"article_title\" in citation: name = citation.get(\"article_title\") elif \"title\" in citation:",
"config.get(\"EMAIL\") self.recipients = config.get(\"ADMINS\") self.person_iri = person_iri def __send_email__(self, subject, body): \"\"\"Sends email",
"rdflib.URIRef(form.get(\"iri\")) profile = EmailProfile(config_manager, person_iri) msg = \"\" results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower()))",
"= getattr(self, graph_name) graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest() if graph_sha1 == self.graph_hashes[graph_name]: return git_graph =",
"administor \"\"\" work_graph = kwargs.get(\"graph\") config = kwargs.get(\"config\") sender = config.get('EMAIL')['user'] recipients =",
"profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) add_qualified_generation(profile.graph, statement_iri, generated_by) form_subjects = form.getlist(\"subjects\") new_subjects =",
"citation = utilities.Article_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_article() citation.add_article() elif work_type.startswith(\"book\"): citation =",
"person_iri) msg = \"\" results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0:",
"raw_turtle = base64.b64decode(blob.content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"):",
"import base64 import bibcat import datetime import hashlib import io import os import",
"self.config.get(\"TRIPLESTORE_URL\") self.graph = rdflib.Graph() self.graph.namespace_manager.bind(\"bf\", BF) self.graph.namespace_manager.bind(\"cite\", CITE) self.graph.namespace_manager.bind(\"schema\", SCHEMA) self.graph.namespace_manager.bind(\"prov\", PROV) self.email",
"= \",\".join([\"<{0}>\".format(r) for r in recipients]) if len(carbon_copy) > 0: message[\"Cc\"] = ','.join(carbon_copy)",
"= soup.new_tag(\"div\", **{\"class\": \"col-4\"}) iri = citation.get(\"iri\") if iri: edit_click = \"editCitation('{}');\".format(iri) delete_click",
"recipients = list(set(self.recipients)) # Quick dedup email_server.sendmail(self.email.get(\"user\"), recipients, message.as_string()) email_server.close() def __add_article__(self, work_iri,",
"\"\"\"Function takes a work graph and configuration and emails the graph in turtle",
"author, current_user.data.get('mail'), datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Deletion of {} for {} under review\".format(",
"to work has been submitted for review\", \"status\": True} def update_profile(**kwargs): \"\"\"Updates existing",
"config = kwargs.get(\"config\") profile = EmailProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') connection",
"content.name.startswith(\"cc-people\"): self.cc_people_git = content self.cc_people.parse(data=raw_turtle, format='turtle') self.graph_hashes[\"cc_people\"] = hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"] = hashlib.sha1(",
"len(result) > 0: periodical = result[0].get(\"periodical\").get(\"value\") if periodical != str(entity): new_work = rdflib.URIRef(periodical)",
"entity, label = row break volume, issue = None, None volume_or_issue = work_graph.value(predicate=SCHEMA.partOf,",
"existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) for row in self.research_statements.objects( subject=existing_stmt, predicate=SCHEMA.about): existing_subjects.add(row) for",
"retrieving and parsing latest RDF for creative works, # research statements, and FAST",
"recipients.extend(carbon_copy) body = MIMEText(text, _charset=\"UTF-8\") message.attach(body) if work_graph: work_turtle = io.StringIO( work_graph.serialize(format='turtle').decode()) attachment",
"\"Properties and Values for Creative Work {}\".format(work_iri) for row in work_form._fields: if row.startswith(\"csrf_token\"):",
"attachment = MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='work.ttl') message.attach(attachment) #try: server = smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port']) server.ehlo()",
"= kwargs.get(\"file_path\") branch = kwargs.get(\"branch\") graph_name = kwargs.get(\"graph_name\") graph = getattr(self, graph_name) message",
"revised_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) temp_work = rdflib.Graph() temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for prefix, namespace in",
"bibcat.replace_iri(work_graph, issue, new_issue) def add_creative_work(**kwargs): \"\"\"Calls utilities to populate and save to datastore\"\"\"",
"rdflib.Namespace(\"http://schema.org/\") class GitProfile(object): def __init__(self, config): self.graph_hashes = {} cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\"))",
"existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) current_description = self.research_statements.value( subject=existing_stmt, predicate=SCHEMA.description) new_description = self.profile.graph.value(",
"getattr(self, graph_name) graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest() if graph_sha1 == self.graph_hashes[graph_name]: return git_graph = getattr(self,",
"= citation.get(\"page_end\") if page_end and len(page_end) > 0: span = soup.new_tag(\"span\") if \"page_start\"",
") if result and len(result) > 0: new_issue = rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph, issue, new_issue)",
"rdflib.Namespace(\"http://schema.org/\") for row in work_graph.query( \"\"\"SELECT ?entity ?label WHERE { ?entity rdf:type schema:Periodical",
"content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git =",
"administrators for review before adding to production. Keyword args: work_graph(rdflib.Graph): RDF Graph of",
"RDF graph for new profile or editing profile that is send via email",
"if \"url\" in work_form and len(work_form.url.data) > 0: self.graph.add((work_iri, SCHEMA.url, rdflib.URIRef(work_form.url.data))) if work_form.abstract.data",
"self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') def __save_graph__(self,",
"config.get(\"ADMINS\") self.person_iri = person_iri def __send_email__(self, subject, body): \"\"\"Sends email to administrators with",
"new profile or editing profile that is send via email to the Administrators",
"content self.current_year.parse(data=raw_turtle, format='turtle') if content.name.startswith(\"cc-people\"): self.cc_people_git = content self.cc_people.parse(data=raw_turtle, format='turtle') self.graph_hashes[\"cc_people\"] = hashlib.sha1(",
"self.__send_email__(\"Updating Profile\", message) def __email_work__(**kwargs): \"\"\"Function takes a work graph and configuration and",
"span = soup.new_tag(\"span\") span.string = name col_2.append(span) if \"journal_title\" in citation: em =",
"= citation.get(\"journal_title\") col_2.append(em) if \"year\" in citation: span = soup.new_tag(\"span\") span.string = \"({0})\".format(citation.get(\"year\"))",
"= kwargs.get(\"work_type\", \"article\") if revised_by is None and current_user_email: sparql = EMAIL_LOOKUP.format( current_user_email.lower())",
"current_user_email: sparql = EMAIL_LOOKUP.format( current_user_email.lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0: revised_by",
"text=\"Edited {} revised by {} on {}, see attached RDF turtle file\".format( citation.citation_type,",
"iri_subject)) existing_label = profile.fast_subjects.value( subject=iri_subject, predicate=rdflib.RDFS.label) if existing_label is None: profile.graph.add( (iri_subject, rdflib.RDF.type,",
"for {} {}\".format( form.get('given_name'), form.get('family_name')), lang=\"en\"))) add_qualified_generation( profile.graph, statement_iri, generated_by) citations = form.getlist(\"citations\")",
"return {\"message\": \"New work has been submitted for review\", \"status\": True, \"iri\": work_iri}",
"= smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port']) server.ehlo() if config.get('EMAIL')['tls']: server.starttls() server.ehlo() server.login(sender, config.get(\"EMAIL\")[\"password\"]) recipients = list(set(recipients))",
"= kwargs.get(\"iri\") __email_work__( config=config, carbon_copy=[current_user.data.get('mail'),], subject=\"Delete Request\", text=\"Delete citation {} for {}\\nrequested by",
"email = form.get(\"email\") profile.graph.add((person_iri, SCHEMA.email.rdflib, rdflib.Literal(email))) add_qualified_generation(profile.graph, person_iri, generated_by) dept_year = kwargs.get(\"year-iri\") if",
"schema:volumeNumber ?volumeNumber . BIND(<{0}> as ?work) BIND(\"{1}\" as ?volumeNumber) }}\"\"\".format(entity, vol_num)) if result",
"col_2.append(work_link) else: span = soup.new_tag(\"span\") span.string = name col_2.append(span) if \"journal_title\" in citation:",
"# Pull in the latest changes in each repository for directory in data_upload:",
"{{ ?issue schema:partOf ?volume . }} OPTIONAL {{ ?issue schema:partOf ?periodical . }}",
"= kwargs.get(\"config\") current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') profile = EmailProfile(config) connection =",
"namespace in git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix, namespace) if work_type.startswith(\"article\"): citation = utilities.Article_Citation(raw_citation, temp_work, git_profile.cc_people, False)",
"Quick dedup server.sendmail(sender, recipients, message.as_string()) server.close() def generate_citation_html(citation): soup = BeautifulSoup(\"\", 'lxml') div",
"rdflib import requests from bs4 import BeautifulSoup from flask import current_app from github",
"and commits any changes to self.__update_fast_subjects__() self.__update_research_statements__() self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=self.statement_msg) self.__save_graph__( file_path",
"Creative Work IRI for Article work_form(Flask.request.form): Dict of form values \"\"\" self.graph.add((work_iri, rdflib.RDF.type,",
"= volume_or_issue result = connection.datastore.query(\"\"\"SELECT ?periodical WHERE {{ ?periodical schema:name ?name . FILTER(CONTAINS(?name,",
"graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def __update_fast_subjects__(self): existing_subjects, new_subjects =",
"SCHEMA.about, subject)) self.fast_subjects.add((subject, rdflib.RDF.type, BF.Topic)) subject_label = self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label) if subject_label is not",
"update_all(self, person_label, action=\"Add\", connection=None): self.__save_graph__( git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\", message=\"{} {} to CC People\".format(action,",
"self.graph.add((work, rdflib.RDF.type, SCHEMA.Book)) self.graph.add((work, SCHEMA.title, rdflib.Literal(work_form.book_title.data))) if work_form.isbn.data is not None: self.graph.add((work, SCHEMA.isbn,",
"new profile\"\"\" self.__send_email__(\"Add new profile\", message) def update(self, message): \"\"\"Edits existing profile\"\"\" global",
"graph_turtle = io.StringIO( self.graph.serialize(format='turtle').decode()) attachment = MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='profile.ttl') message.attach(attachment) email_server.login( self.email.get(\"user\"),",
"edit_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user_email = kwargs.get(\"current_user_email\") config_manager = kwargs.get('config_manager')",
"repo_name, content): raw_turtle = None try: raw_turtle = content.decoded_content except GithubException: repo =",
"= kwargs.get(\"generated_by\") work_form = kwargs.get(\"work_form\") BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema sparql =",
"cc_github.get_organization(\"Tutt-Library\") # Start retrieving and parsing latest RDF for current academic year #",
"# Start retrieving and parsing latest RDF for current academic year # and",
"if work_form.page_start.data !=None: self.graph.add((work_iri, SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data))) if work_form.page_end.data !=None: self.graph.add((work_iri, SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data))) journal",
"= \",\".join([\"<{0}>\".format(r) for r in self.recipients]) message[\"Subject\"] = subject email_server = smtplib.SMTP( self.email.get(\"host\"),",
"person_iri, generated_by) dept_year = kwargs.get(\"year-iri\") if dept_year is not None: dept_year_iri = rdflib.URIRef(dept_year_iri)",
"for {}\\nrequested by {} on {}\".format( iri, author, current_user.data.get('mail'), datetime.datetime.utcnow().isoformat()) ) return {\"message\":",
"creates a local RDF graph for new profile or editing profile that is",
"update(self, message): \"\"\"Edits existing profile\"\"\" global BACKGROUND_THREAD BACKGROUND_THREAD = ProfileUpdateThread( config=self.config, msg=message, person=self.person_iri,",
"rdflib.BNode() self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume)) self.graph.add((volume, SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume, SCHEMA.partOf, journal)) if work_form.issue_number.data !=",
"{} to CC People\".format(action, person_label)) self.__save_graph__( git_repo=self.tiger_repo, file_path=self.current_year_path, graph_name=\"current_year\", message=\"{} person to Department",
"= rdflib.Graph() self.cc_people = rdflib.Graph() self.tiger_repo = self.tutt_github.get_repo(\"tiger-catalog\") for content in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle",
"self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/creative-works.ttl\", graph_name=\"creative_works\",",
"col_2.append(span) vol_number = citation.get(\"volume_number\") if vol_number and len(vol_number) > 0: span = soup.new_tag(\"span\")",
"= base64.b64decode(blob.content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git",
"if subject_label is not None: self.fast_subjects.add((subject, rdflib.RDFS.label, subject_label)) def __update_research_statements__(self): existing_stmt = self.research_statements.value(",
"self.research_statements.add((existing_stmt, SCHEMA.description, new_description)) def run(self): # Function iterates and commits any changes to",
"?name . FILTER(CONTAINS(?name, \"{0}\")) }}\"\"\".format(label)) if result and len(result) > 0: periodical =",
"= kwargs.get(\"branch\") message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) graph = getattr(self, graph_name) graph_sha1 =",
"result[0].get(\"periodical\").get(\"value\") if periodical != str(entity): new_work = rdflib.URIRef(periodical) bibcat.replace_iri(work_graph, entity, new_work) entity =",
"if connection: self.__reload_triplestore__(connection) def __reload_triplestore__(self, config_mgr): data_upload = [] for row in config_mgr.get(\"CONNECTIONS\"):",
"values \"\"\" if len(work_form.iri.data) > 0: work_iri = rdflib.URIRef(work_form.iri.data) else: # Mint IRI",
"{{ ?periodical schema:name ?name . FILTER(CONTAINS(?name, \"{0}\")) }}\"\"\".format(label)) if result and len(result) >",
"= None try: raw_turtle = content.decoded_content except GithubException: repo = getattr(self, repo_name) blob",
"kwargs.get(\"file_path\") graph_name = kwargs.get(\"graph_name\") branch = kwargs.get(\"branch\") message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) graph",
"new_description = self.profile.graph.value( subject=existing_stmt, predicate=SCHEMA.description) if new_description is not None \\ and str(current_description)",
"message = \"New {} as {} to Colorado College's Scholarship Graph\".format( label, person_iri)",
"EmailProfile(config_manager, person_iri) msg = \"\" results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) >",
"= kwargs.get(\"config\") git_profile = GitProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') author =",
"= self.config.get(\"TRIPLESTORE_URL\") self.graph = rdflib.Graph() self.graph.namespace_manager.bind(\"bf\", BF) self.graph.namespace_manager.bind(\"cite\", CITE) self.graph.namespace_manager.bind(\"schema\", SCHEMA) self.graph.namespace_manager.bind(\"prov\", PROV)",
"work_form.editionStatement.data is not None: self.graph.add( (work, SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data))) if work_form.editor.data is not None:",
"volume is not None: vol_num = work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber) result = connection.datastore.query(\"\"\"SELECT ?volume WHERE",
"= kwargs.get(\"year-iri\") if dept_year is not None: dept_year_iri = rdflib.URIRef(dept_year_iri) title = kwargs.get(\"title-iri\")",
"\\ and str(current_description) != str(new_description): self.research_statements.remove((existing_stmt, SCHEMA.description, current_description)) self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt, SCHEMA.description, new_description)) def",
"Journal as last resort self.graph.add((work_iri, SCHEMA.partOf, journal)) if work_form.month.data != None: self.graph.add((work_iri, CITE.month,",
"profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {}\".format(label), lang=\"en\"))) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add(",
"None: profile.graph.add((person_iri, SCHEMA.familyName.rdflib, rdflib.Literal(family_name, lang=\"en\"))) label = \"{} {}\".format(given_name, family_name) profile.graph.add((person_iri, rdflib.RDFS.label, rdflib.Literal(label,",
"= form.get(\"email\") profile.graph.add((person_iri, SCHEMA.email.rdflib, rdflib.Literal(email))) add_qualified_generation(profile.graph, person_iri, generated_by) dept_year = kwargs.get(\"year-iri\") if dept_year",
"values \"\"\" self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.ScholarlyArticle)) self.graph.add((work_iri, SCHEMA.name, rdflib.Literal(work_form.article_title.data))) if work_form.page_start.data !=None: self.graph.add((work_iri, SCHEMA.pageStart,",
"None if work_form.volume_number.data != None: volume = rdflib.BNode() self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume)) self.graph.add((volume, SCHEMA.volumeNumber,",
"current_user.data.get('mail').lower())) if len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = person_iri msg",
"= work_graph.value(predicate=SCHEMA.partOf, object=volume) elif schema_class is SCHEMA.issueNumber: issue = volume_or_issue result = connection.datastore.query(\"\"\"SELECT",
"delete_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-trash-alt\"})) col_3.append(delete_a) div.append(col_3) return div.prettify() def __reconcile_article__(work_graph, connection): SCHEMA =",
"form.get('family_name')), lang=\"en\"))) add_qualified_generation( profile.graph, statement_iri, generated_by) citations = form.getlist(\"citations\") for uri in citations:",
"def generate_citation_html(citation): soup = BeautifulSoup(\"\", 'lxml') div = soup.new_tag(\"div\", **{\"class\": \"row\"}) col_1 =",
"= fast_label for fast_subject, fast_label in new_subjects.items(): iri_subject = rdflib.URIRef(fast_subject) profile.graph.add( (statement_iri, SCHEMA.about.rdflib,",
"action=\"Add\", connection=None): self.__save_graph__( git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\", message=\"{} {} to CC People\".format(action, person_label)) self.__save_graph__(",
"= citation.get(\"volume_number\") if vol_number and len(vol_number) > 0: span = soup.new_tag(\"span\") span.string =",
"\"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research",
"specific data added to creative work Args: work_iri(rdflib.URIRef): Creative Work IRI for Article",
"vol_number = citation.get(\"volume_number\") if vol_number and len(vol_number) > 0: span = soup.new_tag(\"span\") span.string",
"fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject = rdflib.URIRef(fast_uri) profile.graph.add( (statement_iri,",
") return {\"message\": \"Changes to work has been submitted for review\", \"status\": True}",
"hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest() # Start retrieving and parsing latest RDF for creative works, #",
"= self.profile.graph.value( subject=existing_stmt, predicate=SCHEMA.description) if new_description is not None \\ and str(current_description) !=",
"in citation: page_string = \"- {}.\" else: page_string = \"{}.\" span.string = page_string.format(page_end)",
"= now.year + 1 self.current_year_path = \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year, end_year) self.current_year = rdflib.Graph() self.cc_people",
"rdflib.Literal(work_form.article_title.data))) if work_form.page_start.data !=None: self.graph.add((work_iri, SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data))) if work_form.page_end.data !=None: self.graph.add((work_iri, SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data)))",
"= self.scholarship_repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle')",
"if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) return work_iri def add(self, work_form, generated_by=None):",
"None try: raw_turtle = content.decoded_content except GithubException: repo = getattr(self, repo_name) blob =",
"kwargs.get('config_manager') author = kwargs.get(\"author\") connection = config_manager.conns iri = kwargs.get(\"iri\") __email_work__( config=config, carbon_copy=[current_user.data.get('mail'),],",
"> 0: message[\"Cc\"] = ','.join(carbon_copy) recipients.extend(carbon_copy) body = MIMEText(text, _charset=\"UTF-8\") message.attach(body) if work_graph:",
"form.get('given_name'), form.get('family_name')), lang=\"en\"))) add_qualified_generation( profile.graph, statement_iri, generated_by) citations = form.getlist(\"citations\") for uri in",
"subject=email_subject, text=\"Edited {} revised by {} on {}, see attached RDF turtle file\".format(",
"subject=existing_stmt, predicate=SCHEMA.description) new_description = self.profile.graph.value( subject=existing_stmt, predicate=SCHEMA.description) if new_description is not None \\",
"add_qualified_revision(temp_work, rdflib.URIRef(citation.iri), revised_by) email_subject = 'Edited Creative Work {}'.format(citation.iri) __email_work__(graph=temp_work, config=config, carbon_copy=[current_user_email,], subject=email_subject,",
"subject email_server = smtplib.SMTP( self.email.get(\"host\"), self.email.get(\"port\")) email_server.ehlo() if self.email.get(\"tls\"): email_server.starttls() body = MIMEText(body,",
"changes in each repository for directory in data_upload: os.chdir(directory) result = subprocess.run(['git', 'pull',",
"if work_form.notes.data is not None: self.graph.add( (work, SCHEMA.description, rdflib.Literal(work_form.notes.data))) def __populate_work__(self, work_form, generated_by=None):",
"EMAIL_LOOKUP.format( current_user.data.get('mail').lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0: generated_by = rdflib.URIRef( email_results[0].get(\"person\").get('value'))",
". BIND(<{0}> as ?work) BIND(\"{1}\" as ?volumeNumber) }}\"\"\".format(entity, vol_num)) if result and len(result)",
"> 0: span = soup.new_tag(\"span\") if \"page_start\" in citation: page_string = \"- {}.\"",
"len(vol_number) > 0: span = soup.new_tag(\"span\") span.string = \"v. {}\".format(vol_number) col_2.append(span) issue_number =",
"= \"p. {}\".format(page_start) col_2.append(span) page_end = citation.get(\"page_end\") if page_end and len(page_end) > 0:",
"r in recipients]) if len(carbon_copy) > 0: message[\"Cc\"] = ','.join(carbon_copy) recipients.extend(carbon_copy) body =",
"message=\"Creative Works added\") if connection: self.__reload_triplestore__(connection) def __reload_triplestore__(self, config_mgr): data_upload = [] for",
"(statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {} {}\".format( form.get('given_name'), form.get('family_name')),",
"self.fast_subjects.add((subject, rdflib.RDF.type, BF.Topic)) subject_label = self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label) if subject_label is not None: self.fast_subjects.add((subject,",
"content self.fast_subjects.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"creative-works\"): self.creative_works_git = content self.creative_works.parse( data=raw_turtle, format='turtle') self.graph_hashes[\"creative_works\"]",
"volume = volume_or_issue issue = work_graph.value(predicate=SCHEMA.partOf, object=volume) elif schema_class is SCHEMA.issueNumber: issue =",
"for row in work_graph.query( \"\"\"SELECT ?entity ?label WHERE { ?entity rdf:type schema:Periodical ;",
"Graph\".format( label, person_iri) profile.new(message) def delete_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user",
"WHERE {{ ?volume schema:partOf ?work ; schema:volumeNumber ?volumeNumber . BIND(<{0}> as ?work) BIND(\"{1}\"",
"citation.get(\"page_start\") if page_start and len(page_start) > 0: span = soup.new_tag(\"span\") span.string = \"p.",
"config = kwargs.get(\"config\") cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.tutt_github = cc_github.get_organization(\"Tutt-Library\") self.statement_msg = kwargs.get(\"msg\")",
"to Colorado College's Scholarship Graph\".format( label, person_iri) profile.new(message) def delete_creative_work(**kwargs): config = kwargs.get(\"config\")",
"**{\"class\": \"row\"}) col_1 = soup.new_tag(\"div\", **{\"class\": \"col-1\"}) citation_type = citation.get(\"ENTRYTYPE\") if citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\",",
"= soup.new_tag(\"span\") span.string = \"p. {}\".format(page_start) col_2.append(span) page_end = citation.get(\"page_end\") if page_end and",
"7: start_year = now.year - 1 end_year = now.year else: start_year = now.year",
"message): \"\"\"Adds a new profile\"\"\" self.__send_email__(\"Add new profile\", message) def update(self, message): \"\"\"Edits",
"Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.tutt_github = cc_github.get_organization(\"Tutt-Library\") self.statement_msg = kwargs.get(\"msg\") self.person_iri = kwargs.get(\"person\") self.research_statements =",
"else: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def update_all(self, person_label, action=\"Add\", connection=None): self.__save_graph__( git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\",",
"col_3.append(edit_a) delete_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-danger\", \"onclick\": delete_click, \"type=\": \"input\"}) delete_a.append(soup.new_tag(\"i\", **{\"class\":",
"datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Deletion of {} for {} under review\".format( iri, author),",
"{\"message\": \"Deletion of {} for {} under review\".format( iri, author), \"status\": True} def",
"dedup server.sendmail(sender, recipients, message.as_string()) server.close() def generate_citation_html(citation): soup = BeautifulSoup(\"\", 'lxml') div =",
"in row.get(\"data_upload\"): data_upload.append(directory_row[1]) # Pull in the latest changes in each repository for",
"issue = rdflib.BNode() self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue)) self.graph.add((issue, SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data))) if volume is not",
"def __save_graph__(self, **kwargs): file_path = kwargs.get(\"file_path\") branch = kwargs.get(\"branch\") graph_name = kwargs.get(\"graph_name\") graph",
"= EmailProfile(config_manager, person_iri) msg = \"\" results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results)",
"= config.get(\"ADMINS\") self.person_iri = person_iri def __send_email__(self, subject, body): \"\"\"Sends email to administrators",
"new work if \"doi\" in work_form and len(work_form.doi.data) > 0: work_iri = rdflib.URIRef(work_form.doi.data)",
"triples based on form values\"\"\" config_manager = kwargs.get('config_manager') connection = config_manager.conns BF =",
"> 0: work_iri = rdflib.URIRef(work_form.doi.data) else: work_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) self.graph.add((work_iri, SCHEMA.dataPublished,",
"= rdflib.URIRef(person_uri) if generated_by is None: generated_by = person_iri profile.graph.add( (person_iri, rdflib.RDF.type, BF.Person.rdflib))",
"datetime import hashlib import io import os import pprint import smtplib import subprocess",
"+ 1 self.current_year_path = \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year, end_year) self.current_year = rdflib.Graph() self.cc_people = rdflib.Graph()",
"vol_number and len(vol_number) > 0: span = soup.new_tag(\"span\") span.string = \"v. {}\".format(vol_number) col_2.append(span)",
"Request\", text=\"Delete citation {} for {}\\nrequested by {} on {}\".format( iri, author, current_user.data.get('mail'),",
"git_repo=self.scholarship_repo, file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/creative-works.ttl\", graph_name=\"creative_works\", message=\"Creative",
"been submitted for review\", \"status\": True, \"iri\": work_iri} def add_profile(**kwargs): \"\"\"Adds a profile",
"= new_work if volume is not None: vol_num = work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber) result =",
"getattr(self, \"{}_git\".format(graph_name)) if branch: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: git_repo.update_file(file_path, message, graph.serialize(format='turtle'),",
"existing triples based on form values\"\"\" config_manager = kwargs.get('config_manager') connection = config_manager.conns BF",
"if len(email_results) > 0: generated_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) work_iri = rdflib.URIRef(profile.add(work_form, generated_by)) #profile.update(\"Added",
"connection=None): self.__save_graph__( git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\", message=\"{} {} to CC People\".format(action, person_label)) self.__save_graph__( git_repo=self.tiger_repo,",
"if work_form.isbn.data is not None: self.graph.add((work, SCHEMA.isbn, rdflib.Literal(work_form.isbn.data))) if work_form.editionStatement.data is not None:",
"config.get('EMAIL')['port']) server.ehlo() if config.get('EMAIL')['tls']: server.starttls() server.ehlo() server.login(sender, config.get(\"EMAIL\")[\"password\"]) recipients = list(set(recipients)) # Quick",
"self.graph.add((work_iri, SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data))) journal = rdflib.BNode() self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical)) self.graph.add((journal, SCHEMA.name, rdflib.Literal(work_form.journal_title.data))) issue,",
"'attachment', filename='profile.ttl') message.attach(attachment) email_server.login( self.email.get(\"user\"), self.email.get(\"password\")) recipients = list(set(self.recipients)) # Quick dedup email_server.sendmail(self.email.get(\"user\"),",
"config): self.graph_hashes = {} cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.triplestore_url = config.get(\"TRIPLESTORE_URL\") self.tutt_github =",
"file_path =\"/data/creative-works.ttl\", graph_name=\"creative_works\", message=\"Creative Works added\") if connection: self.__reload_triplestore__(connection) def __reload_triplestore__(self, config_mgr): data_upload",
"profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) profile.update(msg) return {\"message\": msg,",
"col_2.append(span) page_end = citation.get(\"page_end\") if page_end and len(page_end) > 0: span = soup.new_tag(\"span\")",
"= rdflib.URIRef( email_results[0].get(\"person\").get('value')) work_iri = rdflib.URIRef(profile.add(work_form, generated_by)) #profile.update(\"Added or Updated Creative Work\") return",
"generated_by self.graph.add((work_iri, SCHEMA.author, generated_by)) if \"url\" in work_form and len(work_form.url.data) > 0: self.graph.add((work_iri,",
"in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle = self.__get_content__(\"tiger_repo\", content) if content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git = content self.current_year.parse(data=raw_turtle, format='turtle')",
"SCHEMA.author, generated_by)) if \"url\" in work_form and len(work_form.url.data) > 0: self.graph.add((work_iri, SCHEMA.url, rdflib.URIRef(work_form.url.data)))",
"raw_turtle = content.decoded_content except GithubException: repo = getattr(self, repo_name) blob = repo.get_git_blob(content.sha) raw_turtle",
"config_manager = kwargs.get('config_manager') profile = EmailProfile(config) connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA",
"output = '' person_iri = rdflib.URIRef(form.get(\"iri\")) profile = EmailProfile(config_manager, person_iri) msg = \"\"",
"content in self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle = self.__get_content__(\"scholarship_repo\", content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse(",
"getattr(self, \"{}_git\".format(graph_name)) if branch: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'),",
"work_turtle = io.StringIO( work_graph.serialize(format='turtle').decode()) attachment = MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='work.ttl') message.attach(attachment) #try: server",
"\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()) person_iri = rdflib.URIRef(person_uri) if generated_by is None: generated_by = person_iri profile.graph.add(",
"import requests from bs4 import BeautifulSoup from flask import current_app from github import",
"work_form.page_end.data !=None: self.graph.add((work_iri, SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data))) journal = rdflib.BNode() self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical)) self.graph.add((journal, SCHEMA.name,",
"self.fast_subjects = rdflib.Graph() self.profile = kwargs.get(\"profile\") self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"):",
"message = MIMEMultipart() message[\"From\"] = self.email.get(\"user\") message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in self.recipients])",
"person_iri)) statement = form.get(\"research_stmt\") if len(statement) > 0: profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\")))",
"current_user_email = kwargs.get(\"current_user_email\") config_manager = kwargs.get('config_manager') connection = config_manager.conns revised_by = kwargs.get(\"revised_by\") raw_citation",
"except GithubException: blob = self.scholarship_repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content",
"form.get(\"orcid\") else: person_uri = \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()) person_iri = rdflib.URIRef(person_uri) if generated_by is None:",
"graph_name) message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: self.scholarship_repo.update_file(file_path,",
"and len(work_form.url.data) > 0: self.graph.add((work_iri, SCHEMA.url, rdflib.URIRef(work_form.url.data))) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about,",
"turtle format to the administrators for review before adding to production. Keyword args:",
"= rdflib.URIRef(fast_subject) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) existing_label = profile.fast_subjects.value( subject=iri_subject, predicate=rdflib.RDFS.label) if existing_label",
"email_results[0].get(\"person\").get('value')) temp_work = rdflib.Graph() temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for prefix, namespace in git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix, namespace)",
"= BeautifulSoup(\"\", 'lxml') div = soup.new_tag(\"div\", **{\"class\": \"row\"}) col_1 = soup.new_tag(\"div\", **{\"class\": \"col-1\"})",
"config_manager.nsm.schema sparql = EMAIL_LOOKUP.format( current_user.data.get('mail').lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0: generated_by",
"=\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") class EmailProfile(object): \"\"\"Simple Email Profile class that creates",
"soup.new_tag(\"span\") span.string = \"p. {}\".format(page_start) col_2.append(span) page_end = citation.get(\"page_end\") if page_end and len(page_end)",
"__save_graph__(self, **kwargs): file_path = kwargs.get(\"file_path\") branch = kwargs.get(\"branch\") graph_name = kwargs.get(\"graph_name\") graph =",
"new_subjects = {} for row in form_subjects: fast_id, fast_label = row.split(\"==\") if fast_id.startswith(\"http\"):",
"= MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='profile.ttl') message.attach(attachment) email_server.login( self.email.get(\"user\"), self.email.get(\"password\")) recipients = list(set(self.recipients)) #",
"\"title\" in citation: name = citation.get(\"title\") if \"url\" in citation: work_link = soup.new_tag(\"a\",",
"content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git = content self.current_year.parse(data=raw_turtle, format='turtle') if content.name.startswith(\"cc-people\"): self.cc_people_git = content self.cc_people.parse(data=raw_turtle, format='turtle')",
"subjects self.research_statements.add((existing_stmt, SCHEMA.about, subject)) self.fast_subjects.add((subject, rdflib.RDF.type, BF.Topic)) subject_label = self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label) if subject_label",
"branch=branch) else: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def __update_fast_subjects__(self): existing_subjects, new_subjects = set(), set()",
"citation: em = soup.new_tag(\"em\") em.string = citation.get(\"journal_title\") col_2.append(em) if \"year\" in citation: span",
"delete_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager')",
"MIMEMultipart() message[\"From\"] = self.email.get(\"user\") message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in self.recipients]) message[\"Subject\"] =",
"lang=\"en\"))) form_subjects = form.getlist(\"subjects\") new_subjects = {} for row in form_subjects: fast_id, fast_label",
"connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by =",
"col_1 = soup.new_tag(\"div\", **{\"class\": \"col-1\"}) citation_type = citation.get(\"ENTRYTYPE\") if citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas",
"(statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for",
"on {}, see attached RDF turtle file\".format( citation.citation_type, revised_by, datetime.datetime.utcnow().isoformat()) ) return {\"message\":",
"SCHEMA = rdflib.Namespace(\"http://schema.org/\") class GitProfile(object): def __init__(self, config): self.graph_hashes = {} cc_github =",
"if work_form.editor.data is not None: self.graph.add((work, SCHEMA.editor, rdflib.Literal(work_form.editor.data))) if work_form.provisionActivityStatement.data is not None:",
"os.chdir(directory) result = subprocess.run(['git', 'pull', 'origin', 'master']) click.echo(result.returncode, result.stdout) config_mgr.conns.datastore.mgr.reset() class ProfileUpdateThread(threading.Thread): def",
"iri = kwargs.get(\"iri\") __email_work__( config=config, carbon_copy=[current_user.data.get('mail'),], subject=\"Delete Request\", text=\"Delete citation {} for {}\\nrequested",
"def add(self, work_form, generated_by=None): work_iri = self.__populate_work__(work_form, generated_by) email_body = \"Properties and Values",
"not None: self.graph.add((issue, SCHEMA.partOf, volume)) else: self.graph.add((issue, SCHEMA.partOf, journal)) self.graph.add((work_iri, SCHEMA.partOf, issue)) elif",
"row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject = rdflib.URIRef(fast_uri)",
"People\".format(action, person_label)) self.__save_graph__( git_repo=self.tiger_repo, file_path=self.current_year_path, graph_name=\"current_year\", message=\"{} person to Department for school year\".format(action))",
"if given_name is not None: profile.graph.add( (person_iri, SCHEMA.givenName.rdflib, rdflib.Literal(given_name, lang=\"en\"))) family_name = form.get(\"family_name\")",
"in work_form._fields: if row.startswith(\"csrf_token\"): continue field = getattr(work_form, row) email_body += \"\\n{}:\\t{}\".format(row, field.data)",
"profile.new(message) def delete_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user = kwargs.get(\"current_user\") config_manager",
"message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in self.recipients]) message[\"Subject\"] = subject email_server = smtplib.SMTP(",
"citation: span = soup.new_tag(\"span\") span.string = \"({0})\".format(citation.get(\"year\")) col_2.append(span) vol_number = citation.get(\"volume_number\") if vol_number",
"self.__update_research_statements__() self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=self.statement_msg) self.__save_graph__( file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") class",
"if work_form.page_end.data !=None: self.graph.add((work_iri, SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data))) journal = rdflib.BNode() self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical)) self.graph.add((journal,",
"rdflib.Literal(work_form.notes.data))) def __populate_work__(self, work_form, generated_by=None): \"\"\"Populates graph with new work Args: form(Flask.request.form): Dict",
"= config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema form = kwargs.get('form') current_user =",
"# Start retrieving and parsing latest RDF for creative works, # research statements,",
"self.graph.namespace_manager.bind(\"prov\", PROV) self.email = config.get(\"EMAIL\") self.recipients = config.get(\"ADMINS\") self.person_iri = person_iri def __send_email__(self,",
"row.get(\"name\").startswith(\"datastore\"): for directory_row in row.get(\"data_upload\"): data_upload.append(directory_row[1]) # Pull in the latest changes in",
"message) def update(self, message): \"\"\"Edits existing profile\"\"\" global BACKGROUND_THREAD BACKGROUND_THREAD = ProfileUpdateThread( config=self.config,",
"True} def update_profile(**kwargs): \"\"\"Updates existing triples based on form values\"\"\" config_manager = kwargs.get('config_manager')",
"None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) if citation_type.startswith(\"article\"): self.__add_article__(work_iri, work_form) elif citation_type.startswith(\"book chapter\"): self.graph.add((work_iri, rdflib.RDF.type,",
"fast_label for fast_subject, fast_label in new_subjects.items(): iri_subject = rdflib.URIRef(fast_subject) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject))",
"\"status\": True} def update_profile(**kwargs): \"\"\"Updates existing triples based on form values\"\"\" config_manager =",
"\"col-1\"}) citation_type = citation.get(\"ENTRYTYPE\") if citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-file-alt\"})) elif citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\",",
"None: generated_by = person_iri profile.graph.add( (person_iri, rdflib.RDF.type, BF.Person.rdflib)) given_name = form.get(\"given_name\") if given_name",
"{}\".format(given_name, family_name) profile.graph.add((person_iri, rdflib.RDFS.label, rdflib.Literal(label, lang=\"en\"))) email = form.get(\"email\") profile.graph.add((person_iri, SCHEMA.email.rdflib, rdflib.Literal(email))) add_qualified_generation(profile.graph,",
"import mimetypes import click import rdflib import requests from bs4 import BeautifulSoup from",
"?entity ?label WHERE { ?entity rdf:type schema:Periodical ; schema:name ?label . } \"\"\"):",
"for row in form_subjects: fast_id, fast_label = row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri = fast_id",
"kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') author = kwargs.get(\"author\") connection = config_manager.conns iri = kwargs.get(\"iri\")",
"by {} on {}, see attached RDF turtle file\".format( citation.citation_type, revised_by, datetime.datetime.utcnow().isoformat()) )",
"generated_by = person_iri profile.graph.add( (person_iri, rdflib.RDF.type, BF.Person.rdflib)) given_name = form.get(\"given_name\") if given_name is",
"= hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"] = hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest() def __get_content__(self, repo_name, content): raw_turtle =",
"Work IRI for Article work_form(Flask.request.form): Dict of form values \"\"\" self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.ScholarlyArticle))",
"> 0: span = soup.new_tag(\"span\") span.string = \"v. {}\".format(vol_number) col_2.append(span) issue_number = citation.get(\"issue_number\")",
"statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph, statement_iri, generated_by) else: statement_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib))",
"revised_by is None and current_user_email: sparql = EMAIL_LOOKUP.format( current_user_email.lower()) email_results = connection.datastore.query(sparql) if",
"__get_content__(self, repo_name, content): raw_turtle = None try: raw_turtle = content.decoded_content except GithubException: repo",
"if citation_type.startswith(\"article\"): self.__add_article__(work_iri, work_form) elif citation_type.startswith(\"book chapter\"): self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter)) book_bnode = rdflib.BNode()",
"= None, None if work_form.volume_number.data != None: volume = rdflib.BNode() self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume))",
"= [] for row in config_mgr.get(\"CONNECTIONS\"): if row.get(\"name\").startswith(\"datastore\"): for directory_row in row.get(\"data_upload\"): data_upload.append(directory_row[1])",
"kwargs.get(\"current_user_email\") config_manager = kwargs.get('config_manager') connection = config_manager.conns revised_by = kwargs.get(\"revised_by\") raw_citation = kwargs.get(\"citation\")",
"iri: edit_click = \"editCitation('{}');\".format(iri) delete_click = \"deleteCitation('{}');\".format(iri) edit_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-warning",
"work_iri = rdflib.URIRef(work_form.iri.data) else: # Mint IRI for new work if \"doi\" in",
"= getattr(self, repo_name) blob = repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) return raw_turtle def __save_graph__(self,",
"{\"message\": \"Changes to work has been submitted for review\", \"status\": True} def update_profile(**kwargs):",
"adding to production. Keyword args: work_graph(rdflib.Graph): RDF Graph of Citation config: Configuration includes",
"CC people now = datetime.datetime.utcnow() if now.month < 7: start_year = now.year -",
"rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri, CITE.authorString, rdflib.Literal(work_form.author_string.data))) if generated_by: add_qualified_generation(self.graph, work_iri, generated_by) citation_type = work_form.citation_type.data self.graph.add((work_iri,",
"= name col_2.append(span) if \"journal_title\" in citation: em = soup.new_tag(\"em\") em.string = citation.get(\"journal_title\")",
"None: issue_number = work_graph.value(subject=issue, predicate=SCHEMA.issueNumber) result = connection.datastore.query(\"\"\"SELECT ?issue WHERE {{ ?issue rdf:type",
"kwargs.get(\"config\") cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.tutt_github = cc_github.get_organization(\"Tutt-Library\") self.statement_msg = kwargs.get(\"msg\") self.person_iri =",
"work_iri} def add_profile(**kwargs): \"\"\"Adds a profile stub to scholarship graph\"\"\" config = kwargs.get(\"config\")",
"= content self.cc_people.parse(data=raw_turtle, format='turtle') self.graph_hashes[\"cc_people\"] = hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"] = hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest() #",
"= \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject = rdflib.URIRef(fast_uri) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib))",
"bibcat.replace_iri(work_graph, volume, new_volume) if issue is not None: issue_number = work_graph.value(subject=issue, predicate=SCHEMA.issueNumber) result",
"'' person_iri = rdflib.URIRef(form.get(\"iri\")) profile = EmailProfile(config_manager, person_iri) msg = \"\" results =",
"kwargs.get(\"graph_name\") graph = getattr(self, graph_name) message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) git_graph = getattr(self,",
"\"btn btn-warning disabled\", \"onclick\": edit_click, \"type=\": \"input\"}) edit_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-edit\"})) col_3.append(edit_a) delete_a",
"= form.get(\"given_name\") if given_name is not None: profile.graph.add( (person_iri, SCHEMA.givenName.rdflib, rdflib.Literal(given_name, lang=\"en\"))) family_name",
"fast subjects self.research_statements.add((existing_stmt, SCHEMA.about, subject)) self.fast_subjects.add((subject, rdflib.RDF.type, BF.Topic)) subject_label = self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label) if",
"Start retrieving and parsing latest RDF for creative works, # research statements, and",
"__update_fast_subjects__(self): existing_subjects, new_subjects = set(), set() existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) for row",
"GithubException import utilities from .sparql import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI from .sparql import add_qualified_generation,",
"= smtplib.SMTP( self.email.get(\"host\"), self.email.get(\"port\")) email_server.ehlo() if self.email.get(\"tls\"): email_server.starttls() body = MIMEText(body, _charset=\"UTF-8\") message.attach(body)",
"uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {}\".format(label), lang=\"en\")))",
"rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph, statement_iri, generated_by) else: statement_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type,",
"?label WHERE { ?entity rdf:type schema:Periodical ; schema:name ?label . } \"\"\"): entity,",
"GitProfile(config) current_user_email = kwargs.get(\"current_user_email\") config_manager = kwargs.get('config_manager') connection = config_manager.conns revised_by = kwargs.get(\"revised_by\")",
"= soup.new_tag(\"a\", href=citation.get(\"url\")) work_link.string = name col_2.append(work_link) else: span = soup.new_tag(\"span\") span.string =",
"SCHEMA = config_manager.nsm.schema results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0: generated_by",
"len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = person_iri msg = \"{}",
"subjects self.creative_works = rdflib.Graph() self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\")",
"temp_work, git_profile.cc_people, False) citation.populate() citation.populate_article() citation.add_article() elif work_type.startswith(\"book\"): citation = utilities.Book_Citation(raw_citation, temp_work, git_profile.cc_people,",
"raw_turtle = self.__get_content__(\"tiger_repo\", content) if content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git = content self.current_year.parse(data=raw_turtle, format='turtle') if content.name.startswith(\"cc-people\"):",
"__reload_triplestore__(self, config_mgr): data_upload = [] for row in config_mgr.get(\"CONNECTIONS\"): if row.get(\"name\").startswith(\"datastore\"): for directory_row",
"form.get(\"orcid\"): person_uri = form.get(\"orcid\") else: person_uri = \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()) person_iri = rdflib.URIRef(person_uri) if",
"import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI from .sparql import add_qualified_generation, add_qualified_revision BF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE",
"= form.get(\"family_name\") if family_name is not None: profile.graph.add((person_iri, SCHEMA.familyName.rdflib, rdflib.Literal(family_name, lang=\"en\"))) label =",
"Department for school year\".format(action)) self.__save_graph__( git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=\"{} Research Statement for {}\".format(",
"Pull in the latest changes in each repository for directory in data_upload: os.chdir(directory)",
"0: span = soup.new_tag(\"span\") if \"page_start\" in citation: page_string = \"- {}.\" else:",
"server.close() def generate_citation_html(citation): soup = BeautifulSoup(\"\", 'lxml') div = soup.new_tag(\"div\", **{\"class\": \"row\"}) col_1",
"SCHEMA.partOf, journal)) if work_form.issue_number.data != None: issue = rdflib.BNode() self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue)) self.graph.add((issue,",
"MIMEMultipart from email.mime.text import MIMEText import mimetypes import click import rdflib import requests",
"family_name is not None: profile.graph.add((person_iri, SCHEMA.familyName.rdflib, rdflib.Literal(family_name, lang=\"en\"))) label = \"{} {}\".format(given_name, family_name)",
"= self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): try: raw_turtle = content.decoded_content except GithubException: blob",
". }} OPTIONAL {{ ?issue schema:partOf ?periodical . }} BIND(<{0}> as ?volume) BIND(<{1}>",
"that creates a local RDF graph for new profile or editing profile that",
"edit_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-warning disabled\", \"onclick\": edit_click, \"type=\": \"input\"}) edit_a.append(soup.new_tag(\"i\", **{\"class\":",
"\"deleteCitation('{}');\".format(iri) edit_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-warning disabled\", \"onclick\": edit_click, \"type=\": \"input\"}) edit_a.append(soup.new_tag(\"i\",",
"continue field = getattr(work_form, row) email_body += \"\\n{}:\\t{}\".format(row, field.data) self.__send_email__(\"Added New Work\", email_body)",
"work_graph.query( \"\"\"SELECT ?entity ?label WHERE { ?entity rdf:type schema:Periodical ; schema:name ?label .",
"graph\"\"\" config = kwargs.get(\"config\") current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') profile = EmailProfile(config)",
"not None: self.graph.add((work_iri, SCHEMA.partOf, volume)) else: # Add work_iri to Journal as last",
"(iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) message = \"New {} as",
"base64.b64decode(blob.content) return raw_turtle def __save_graph__(self, **kwargs): git_repo = kwargs.get(\"git_repo\") file_path = kwargs.get(\"file_path\") graph_name",
"subject = kwargs.get('subject') text = kwargs.get('text') carbon_copy = kwargs.get(\"carbon_copy\", []) message = MIMEMultipart()",
"WHERE {{ ?issue rdf:type schema:issueNumber ; schema:issueNumber ?issue_number . OPTIONAL {{ ?issue schema:partOf",
"= EmailProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') connection = config_manager.conns generated_by =",
"\"Updating {}\".format(graph_name)) git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch)",
"{} for {} under review\".format( iri, author), \"status\": True} def edit_creative_work(**kwargs): config =",
"utilities from .sparql import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI from .sparql import add_qualified_generation, add_qualified_revision BF",
"!= None: self.graph.add((work_iri, CITE.month, rdflib.Literal(work_form.month.data))) def __add_book__(self, work, work_form): self.graph.add((work, rdflib.RDF.type, SCHEMA.Book)) self.graph.add((work,",
"in citation: span = soup.new_tag(\"span\") span.string = \"({0})\".format(citation.get(\"year\")) col_2.append(span) vol_number = citation.get(\"volume_number\") if",
"is SCHEMA.issueNumber: issue = volume_or_issue result = connection.datastore.query(\"\"\"SELECT ?periodical WHERE {{ ?periodical schema:name",
"uuid from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import mimetypes import click",
"lang=\"en\"))) add_qualified_generation( profile.graph, statement_iri, generated_by) citations = form.getlist(\"citations\") for uri in citations: profile.graph.add(",
"sender message[\"Subject\"] = subject message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in recipients]) if len(carbon_copy)",
"\"New work has been submitted for review\", \"status\": True, \"iri\": work_iri} def add_profile(**kwargs):",
"if \"doi\" in work_form and len(work_form.doi.data) > 0: work_iri = rdflib.URIRef(work_form.doi.data) else: work_iri",
"message=\"{} Research Statement for {}\".format( action, person_label)) self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast",
"}} BIND(<{0}> as ?volume) BIND(<{1}> as ?periodical) BIND(\"{2}\" as ?issue_number) }}\"\"\".format(volume, periodical, issue_number)",
"= \"{} {}\".format(given_name, family_name) profile.graph.add((person_iri, rdflib.RDFS.label, rdflib.Literal(label, lang=\"en\"))) email = form.get(\"email\") profile.graph.add((person_iri, SCHEMA.email.rdflib,",
"\"In Review\" col_1.append(under_review) div.append(col_1) col_2 = soup.new_tag(\"div\", **{\"class\": \"col-7\"}) if \"article_title\" in citation:",
"__save_graph__(self, **kwargs): git_repo = kwargs.get(\"git_repo\") file_path = kwargs.get(\"file_path\") graph_name = kwargs.get(\"graph_name\") branch =",
"predicate=SCHEMA.about): new_subjects.add(fast_heading) for subject in list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt, SCHEMA.about, subject)) for subject in list(new_subjects.difference(existing_subjects)):",
"= ','.join(carbon_copy) recipients.extend(carbon_copy) body = MIMEText(text, _charset=\"UTF-8\") message.attach(body) if work_graph: work_turtle = io.StringIO(",
"if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"creative-works\"): self.creative_works_git = content",
"# Quick dedup email_server.sendmail(self.email.get(\"user\"), recipients, message.as_string()) email_server.close() def __add_article__(self, work_iri, work_form): \"\"\"Article specific",
"email_results = connection.datastore.query(sparql) if len(email_results) > 0: generated_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) work_iri =",
"graph for new profile or editing profile that is send via email to",
"cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.tutt_github = cc_github.get_organization(\"Tutt-Library\") self.statement_msg = kwargs.get(\"msg\") self.person_iri = kwargs.get(\"person\")",
"in form_subjects: fast_id, fast_label = row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri",
"generated_by) citations = form.getlist(\"citations\") for uri in citations: profile.graph.add( (rdflib.URIRef(uri), SCHEMA.author.rdflib, person_iri)) statement",
"CITE.citationType, rdflib.Literal(citation_type))) if \"author\" in work_form and len(work_form.author.data) > 0: self.person_iri = rdflib.URIRef(work_form.author.data)",
"?issue rdf:type schema:issueNumber ; schema:issueNumber ?issue_number . OPTIONAL {{ ?issue schema:partOf ?volume .",
"graph_name=\"cc_people\", message=\"{} {} to CC People\".format(action, person_label)) self.__save_graph__( git_repo=self.tiger_repo, file_path=self.current_year_path, graph_name=\"current_year\", message=\"{} person",
"self.person_iri = person_iri def __send_email__(self, subject, body): \"\"\"Sends email to administrators with attached",
"col_2.append(span) if \"journal_title\" in citation: em = soup.new_tag(\"em\") em.string = citation.get(\"journal_title\") col_2.append(em) if",
"config_manager.nsm.bf SCHEMA = config_manager.nsm.schema sparql = EMAIL_LOOKUP.format( current_user.data.get('mail').lower()) email_results = connection.datastore.query(sparql) if len(email_results)",
"predicate=rdflib.RDFS.label) if existing_label is None: profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label,",
"self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle = self.__get_content__(\"scholarship_repo\", content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle')",
"!= None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) return work_iri def add(self, work_form, generated_by=None): work_iri =",
"cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.triplestore_url = config.get(\"TRIPLESTORE_URL\") self.tutt_github = cc_github.get_organization(\"Tutt-Library\") # Start retrieving",
"works, # research statements, and FAST subjects self.creative_works = rdflib.Graph() self.research_statements = rdflib.Graph()",
"self.graph_hashes[\"current_year\"] = hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest() # Start retrieving and parsing latest RDF for creative",
"0: span = soup.new_tag(\"span\") span.string = \"p. {}\".format(page_start) col_2.append(span) page_end = citation.get(\"page_end\") if",
"branch: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def __update_fast_subjects__(self):",
"elif citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-book\"})) under_review = soup.new_tag(\"em\") under_review.string = \"In Review\"",
"def __reload_triplestore__(self, config_mgr): data_upload = [] for row in config_mgr.get(\"CONNECTIONS\"): if row.get(\"name\").startswith(\"datastore\"): for",
"__init__(self, config, person_iri): self.config = config self.triplestore_url = self.config.get(\"TRIPLESTORE_URL\") self.graph = rdflib.Graph() self.graph.namespace_manager.bind(\"bf\",",
"new_issue) def add_creative_work(**kwargs): \"\"\"Calls utilities to populate and save to datastore\"\"\" config =",
"label, person_iri) profile.new(message) def delete_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user =",
"server.login(sender, config.get(\"EMAIL\")[\"password\"]) recipients = list(set(recipients)) # Quick dedup server.sendmail(sender, recipients, message.as_string()) server.close() def",
"smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port']) server.ehlo() if config.get('EMAIL')['tls']: server.starttls() server.ehlo() server.login(sender, config.get(\"EMAIL\")[\"password\"]) recipients = list(set(recipients)) #",
"kwargs.get(\"git_repo\") file_path = kwargs.get(\"file_path\") graph_name = kwargs.get(\"graph_name\") branch = kwargs.get(\"branch\") message = kwargs.get(\"message\",",
"school year\".format(action)) self.__save_graph__( git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=\"{} Research Statement for {}\".format( action, person_label))",
"= self.email.get(\"user\") message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in self.recipients]) message[\"Subject\"] = subject email_server",
"= row break volume, issue = None, None volume_or_issue = work_graph.value(predicate=SCHEMA.partOf, object=entity) schema_class",
"graph_name=\"research_statements\", message=\"{} Research Statement for {}\".format( action, person_label)) self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\",",
"citation.get(\"volume_number\") if vol_number and len(vol_number) > 0: span = soup.new_tag(\"span\") span.string = \"v.",
"1 end_year = now.year else: start_year = now.year end_year = now.year + 1",
"self.__send_email__(\"Add new profile\", message) def update(self, message): \"\"\"Edits existing profile\"\"\" global BACKGROUND_THREAD BACKGROUND_THREAD",
"rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = None form = kwargs.get(\"form\") if form.get(\"orcid\"): person_uri = form.get(\"orcid\")",
"of {} for {} under review\".format( iri, author), \"status\": True} def edit_creative_work(**kwargs): config",
"name col_2.append(work_link) else: span = soup.new_tag(\"span\") span.string = name col_2.append(span) if \"journal_title\" in",
"rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV = rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA = rdflib.Namespace(\"http://schema.org/\") class GitProfile(object): def __init__(self, config): self.graph_hashes",
"citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-book\"})) under_review = soup.new_tag(\"em\") under_review.string = \"In Review\" col_1.append(under_review)",
"= content.decoded_content except GithubException: blob = self.scholarship_repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git",
"rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) message = \"New {} as {}",
"**{\"class\": \"btn btn-warning disabled\", \"onclick\": edit_click, \"type=\": \"input\"}) edit_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-edit\"})) col_3.append(edit_a)",
"profile = EmailProfile(config) connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema results",
"= self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label) if subject_label is not None: self.fast_subjects.add((subject, rdflib.RDFS.label, subject_label)) def __update_research_statements__(self):",
"volume_or_issue = work_graph.value(predicate=SCHEMA.partOf, object=entity) schema_class = work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type) if schema_class is SCHEMA.volumeNumber: volume",
"new_subjects.add(fast_heading) for subject in list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt, SCHEMA.about, subject)) for subject in list(new_subjects.difference(existing_subjects)): #",
"format='turtle') def __save_graph__(self, **kwargs): file_path = kwargs.get(\"file_path\") branch = kwargs.get(\"branch\") graph_name = kwargs.get(\"graph_name\")",
"elif work_type.startswith(\"book\"): citation = utilities.Book_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_book() citation.add_book() if revised_by:",
"results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else:",
"self.creative_works = rdflib.Graph() self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for",
"body): \"\"\"Sends email to administrators with attached profile graph\"\"\" message = MIMEMultipart() message[\"From\"]",
"graph\"\"\" message = MIMEMultipart() message[\"From\"] = self.email.get(\"user\") message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in",
"soup.new_tag(\"span\") if \"page_start\" in citation: page_string = \"- {}.\" else: page_string = \"{}.\"",
"self.creative_works.parse( data=raw_turtle, format='turtle') self.graph_hashes[\"creative_works\"] = hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"] = hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"] =",
"= config.get(\"EMAIL\") self.recipients = config.get(\"ADMINS\") self.person_iri = person_iri def __send_email__(self, subject, body): \"\"\"Sends",
"following changes to {}'s academic profile:\\n\".format( generated_by, form['label']) statement_iri_results = connection.datastore.query( RESEARCH_STMT_IRI.format( person_iri))",
"to Journal as last resort self.graph.add((work_iri, SCHEMA.partOf, journal)) if work_form.month.data != None: self.graph.add((work_iri,",
"=\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/creative-works.ttl\", graph_name=\"creative_works\", message=\"Creative Works added\")",
"# Mint IRI for new work if \"doi\" in work_form and len(work_form.doi.data) >",
"is not None: profile.graph.add((person_iri, SCHEMA.familyName.rdflib, rdflib.Literal(family_name, lang=\"en\"))) label = \"{} {}\".format(given_name, family_name) profile.graph.add((person_iri,",
"else: # Mint IRI for new work if \"doi\" in work_form and len(work_form.doi.data)",
"fast_id, fast_label = row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:])",
"server.starttls() server.ehlo() server.login(sender, config.get(\"EMAIL\")[\"password\"]) recipients = list(set(recipients)) # Quick dedup server.sendmail(sender, recipients, message.as_string())",
"= list(set(recipients)) # Quick dedup server.sendmail(sender, recipients, message.as_string()) server.close() def generate_citation_html(citation): soup =",
"{}\".format(vol_number) col_2.append(span) issue_number = citation.get(\"issue_number\") if issue_number and len(issue_number ) > 0: span",
"as {} to Colorado College's Scholarship Graph\".format( label, person_iri) profile.new(message) def delete_creative_work(**kwargs): config",
"message[\"From\"] = self.email.get(\"user\") message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in self.recipients]) message[\"Subject\"] = subject",
"= page_string.format(page_end) col_2.append(span) div.append(col_2) col_3 = soup.new_tag(\"div\", **{\"class\": \"col-4\"}) iri = citation.get(\"iri\") if",
"kwargs.get(\"message\", \"Updating {}\".format(graph_name)) graph = getattr(self, graph_name) graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest() if graph_sha1 ==",
"\"year\" in citation: span = soup.new_tag(\"span\") span.string = \"({0})\".format(citation.get(\"year\")) col_2.append(span) vol_number = citation.get(\"volume_number\")",
"span.string = \" no. {}\".format(issue_number) col_2.append(span) page_start = citation.get(\"page_start\") if page_start and len(page_start)",
"{} under review\".format( iri, author), \"status\": True} def edit_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile",
"self.triplestore_url = config.get(\"TRIPLESTORE_URL\") self.tutt_github = cc_github.get_organization(\"Tutt-Library\") # Start retrieving and parsing latest RDF",
"rdflib.RDF.type, SCHEMA.Periodical)) self.graph.add((journal, SCHEMA.name, rdflib.Literal(work_form.journal_title.data))) issue, volume = None, None if work_form.volume_number.data !=",
"file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/creative-works.ttl\", graph_name=\"creative_works\", message=\"Creative Works",
"str(entity): new_work = rdflib.URIRef(periodical) bibcat.replace_iri(work_graph, entity, new_work) entity = new_work if volume is",
"self.graph.add((work_iri, SCHEMA.name, rdflib.Literal(work_form.article_title.data))) if work_form.page_start.data !=None: self.graph.add((work_iri, SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data))) if work_form.page_end.data !=None: self.graph.add((work_iri,",
"disabled\", \"onclick\": edit_click, \"type=\": \"input\"}) edit_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-edit\"})) col_3.append(edit_a) delete_a = soup.new_tag(\"a\",",
"form values \"\"\" self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.ScholarlyArticle)) self.graph.add((work_iri, SCHEMA.name, rdflib.Literal(work_form.article_title.data))) if work_form.page_start.data !=None: self.graph.add((work_iri,",
"\"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject = rdflib.URIRef(fast_uri) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add(",
"if work_type.startswith(\"article\"): citation = utilities.Article_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_article() citation.add_article() elif work_type.startswith(\"book\"):",
"self.person_iri = rdflib.URIRef(work_form.author.data) self.graph.add((work_iri, SCHEMA.author, self.person_iri)) elif generated_by: self.person_iri = generated_by self.graph.add((work_iri, SCHEMA.author,",
"in work_graph.query( \"\"\"SELECT ?entity ?label WHERE { ?entity rdf:type schema:Periodical ; schema:name ?label",
"?work) BIND(\"{1}\" as ?volumeNumber) }}\"\"\".format(entity, vol_num)) if result and len(result) > 0: new_volume",
"volume is not None: self.graph.add((issue, SCHEMA.partOf, volume)) else: self.graph.add((issue, SCHEMA.partOf, journal)) self.graph.add((work_iri, SCHEMA.partOf,",
"if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content",
"generated_by) form_subjects = form.getlist(\"subjects\") new_subjects = {} for row in form_subjects: fast_id, fast_label",
"?issue_number . OPTIONAL {{ ?issue schema:partOf ?volume . }} OPTIONAL {{ ?issue schema:partOf",
"SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume, SCHEMA.partOf, journal)) if work_form.issue_number.data != None: issue = rdflib.BNode() self.graph.add((issue,",
"message = MIMEMultipart() message[\"From\"] = sender message[\"Subject\"] = subject message[\"To\"] = \",\".join([\"<{0}>\".format(r) for",
"git_profile = GitProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') author = kwargs.get(\"author\") connection",
"IRI for Article work_form(Flask.request.form): Dict of form values \"\"\" self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.ScholarlyArticle)) self.graph.add((work_iri,",
"soup.new_tag(\"span\") span.string = \"v. {}\".format(vol_number) col_2.append(span) issue_number = citation.get(\"issue_number\") if issue_number and len(issue_number",
"__send_email__(self, subject, body): \"\"\"Sends email to administrators with attached profile graph\"\"\" message =",
"work, work_form): self.graph.add((work, rdflib.RDF.type, SCHEMA.Book)) self.graph.add((work, SCHEMA.title, rdflib.Literal(work_form.book_title.data))) if work_form.isbn.data is not None:",
"__author__ = \"<NAME>\" import base64 import bibcat import datetime import hashlib import io",
"= now.year else: start_year = now.year end_year = now.year + 1 self.current_year_path =",
"kwargs.get(\"msg\") self.person_iri = kwargs.get(\"person\") self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.profile = kwargs.get(\"profile\")",
"\"row\"}) col_1 = soup.new_tag(\"div\", **{\"class\": \"col-1\"}) citation_type = citation.get(\"ENTRYTYPE\") if citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\", **{\"class\":",
"= content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle')",
"person_uri = form.get(\"orcid\") else: person_uri = \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()) person_iri = rdflib.URIRef(person_uri) if generated_by",
"subject=iri_subject, predicate=rdflib.RDFS.label) if existing_label is None: profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label,",
"add(self, work_form, generated_by=None): work_iri = self.__populate_work__(work_form, generated_by) email_body = \"Properties and Values for",
"= kwargs.get(\"config\") git_profile = GitProfile(config) current_user_email = kwargs.get(\"current_user_email\") config_manager = kwargs.get('config_manager') connection =",
"data_upload: os.chdir(directory) result = subprocess.run(['git', 'pull', 'origin', 'master']) click.echo(result.returncode, result.stdout) config_mgr.conns.datastore.mgr.reset() class ProfileUpdateThread(threading.Thread):",
"\"- {}.\" else: page_string = \"{}.\" span.string = page_string.format(page_end) col_2.append(span) div.append(col_2) col_3 =",
"dept_year is not None: dept_year_iri = rdflib.URIRef(dept_year_iri) title = kwargs.get(\"title-iri\") profile.graph.add( (dept_year_iri, rdflib.URIRef(title),",
"form values \"\"\" if len(work_form.iri.data) > 0: work_iri = rdflib.URIRef(work_form.iri.data) else: # Mint",
"def __init__(self, **kwargs): threading.Thread.__init__(self) config = kwargs.get(\"config\") cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.tutt_github =",
"and len(result) > 0: periodical = result[0].get(\"periodical\").get(\"value\") if periodical != str(entity): new_work =",
"import Github, GithubException import utilities from .sparql import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI from .sparql",
"def update_profile(**kwargs): \"\"\"Updates existing triples based on form values\"\"\" config_manager = kwargs.get('config_manager') connection",
"work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber) result = connection.datastore.query(\"\"\"SELECT ?volume WHERE {{ ?volume schema:partOf ?work ; schema:volumeNumber",
"0: generated_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) work_iri = rdflib.URIRef(profile.add(work_form, generated_by)) #profile.update(\"Added or Updated Creative",
"OPTIONAL {{ ?issue schema:partOf ?volume . }} OPTIONAL {{ ?issue schema:partOf ?periodical .",
"statements, and FAST subjects self.creative_works = rdflib.Graph() self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph()",
"not None: issue_number = work_graph.value(subject=issue, predicate=SCHEMA.issueNumber) result = connection.datastore.query(\"\"\"SELECT ?issue WHERE {{ ?issue",
"col_2.append(em) if \"year\" in citation: span = soup.new_tag(\"span\") span.string = \"({0})\".format(citation.get(\"year\")) col_2.append(span) vol_number",
"len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = None form = kwargs.get(\"form\")",
"label = row break volume, issue = None, None volume_or_issue = work_graph.value(predicate=SCHEMA.partOf, object=entity)",
"Work {}\".format(work_iri) for row in work_form._fields: if row.startswith(\"csrf_token\"): continue field = getattr(work_form, row)",
"result and len(result) > 0: periodical = result[0].get(\"periodical\").get(\"value\") if periodical != str(entity): new_work",
"git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def update_all(self, person_label, action=\"Add\", connection=None): self.__save_graph__( git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\",",
"for content in self.scholarship_repo.get_dir_contents(\"/data/\"): try: raw_turtle = content.decoded_content except GithubException: blob = self.scholarship_repo.get_git_blob(content.sha)",
"to the administrators for review before adding to production. Keyword args: work_graph(rdflib.Graph): RDF",
"msg = \"{} made the following changes to {}'s academic profile:\\n\".format( generated_by, form['label'])",
"self.__update_fast_subjects__() self.__update_research_statements__() self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=self.statement_msg) self.__save_graph__( file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\")",
"**{\"class\": \"col-1\"}) citation_type = citation.get(\"ENTRYTYPE\") if citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-file-alt\"})) elif citation_type.endswith(\"book\"):",
"(statement_iri, SCHEMA.about.rdflib, iri_subject)) existing_label = profile.fast_subjects.value( subject=iri_subject, predicate=rdflib.RDFS.label) if existing_label is None: profile.graph.add(",
"= rdflib.Graph() self.tiger_repo = self.tutt_github.get_repo(\"tiger-catalog\") for content in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle = self.__get_content__(\"tiger_repo\", content)",
"and len(work_form.doi.data) > 0: work_iri = rdflib.URIRef(work_form.doi.data) else: work_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()))",
"kwargs.get(\"citation\") work_type = kwargs.get(\"work_type\", \"article\") if revised_by is None and current_user_email: sparql =",
"RDF for creative works, # research statements, and FAST subjects self.creative_works = rdflib.Graph()",
"fa-edit\"})) col_3.append(edit_a) delete_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-danger\", \"onclick\": delete_click, \"type=\": \"input\"}) delete_a.append(soup.new_tag(\"i\",",
"is not None: self.graph.add( (work, SCHEMA.description, rdflib.Literal(work_form.notes.data))) def __populate_work__(self, work_form, generated_by=None): \"\"\"Populates graph",
"SCHEMA.issueNumber: issue = volume_or_issue result = connection.datastore.query(\"\"\"SELECT ?periodical WHERE {{ ?periodical schema:name ?name",
"\"{} made the following changes to {}'s academic profile:\\n\".format( generated_by, form['label']) statement_iri_results =",
"import uuid from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import mimetypes import",
"else: person_uri = \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()) person_iri = rdflib.URIRef(person_uri) if generated_by is None: generated_by",
"\"{}_git\".format(graph_name)) if branch: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha)",
"SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) add_qualified_generation(profile.graph, statement_iri, generated_by) form_subjects = form.getlist(\"subjects\")",
"= soup.new_tag(\"em\") under_review.string = \"In Review\" col_1.append(under_review) div.append(col_1) col_2 = soup.new_tag(\"div\", **{\"class\": \"col-7\"})",
"rdflib.URIRef(person_uri) if generated_by is None: generated_by = person_iri profile.graph.add( (person_iri, rdflib.RDF.type, BF.Person.rdflib)) given_name",
"datetime.datetime.utcnow() if now.month < 7: start_year = now.year - 1 end_year = now.year",
"Review\" col_1.append(under_review) div.append(col_1) col_2 = soup.new_tag(\"div\", **{\"class\": \"col-7\"}) if \"article_title\" in citation: name",
"config=config, carbon_copy=[current_user.data.get('mail'),], subject=\"Delete Request\", text=\"Delete citation {} for {}\\nrequested by {} on {}\".format(",
"citation.get(\"title\") if \"url\" in citation: work_link = soup.new_tag(\"a\", href=citation.get(\"url\")) work_link.string = name col_2.append(work_link)",
"in self.recipients]) message[\"Subject\"] = subject email_server = smtplib.SMTP( self.email.get(\"host\"), self.email.get(\"port\")) email_server.ehlo() if self.email.get(\"tls\"):",
"work_type = kwargs.get(\"work_type\", \"article\") if revised_by is None and current_user_email: sparql = EMAIL_LOOKUP.format(",
"and configuration and emails the graph in turtle format to the administrators for",
"= \"editCitation('{}');\".format(iri) delete_click = \"deleteCitation('{}');\".format(iri) edit_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-warning disabled\", \"onclick\":",
"revised by {} on {}, see attached RDF turtle file\".format( citation.citation_type, revised_by, datetime.datetime.utcnow().isoformat())",
"message.attach(body) graph_turtle = io.StringIO( self.graph.serialize(format='turtle').decode()) attachment = MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='profile.ttl') message.attach(attachment) email_server.login(",
"self.scholarship_repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle') if",
"for review.\"\"\" def __init__(self, config, person_iri): self.config = config self.triplestore_url = self.config.get(\"TRIPLESTORE_URL\") self.graph",
"a new profile\"\"\" self.__send_email__(\"Add new profile\", message) def update(self, message): \"\"\"Edits existing profile\"\"\"",
"= hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest() # Start retrieving and parsing latest RDF for creative works,",
"self.recipients = config.get(\"ADMINS\") self.person_iri = person_iri def __send_email__(self, subject, body): \"\"\"Sends email to",
"and len(page_end) > 0: span = soup.new_tag(\"span\") if \"page_start\" in citation: page_string =",
"len(carbon_copy) > 0: message[\"Cc\"] = ','.join(carbon_copy) recipients.extend(carbon_copy) body = MIMEText(text, _charset=\"UTF-8\") message.attach(body) if",
"person_iri = rdflib.URIRef(person_uri) if generated_by is None: generated_by = person_iri profile.graph.add( (person_iri, rdflib.RDF.type,",
"not None: vol_num = work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber) result = connection.datastore.query(\"\"\"SELECT ?volume WHERE {{ ?volume",
"else: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def __update_fast_subjects__(self): existing_subjects, new_subjects = set(), set() existing_stmt",
"not None: self.graph.add((work, SCHEMA.editor, rdflib.Literal(work_form.editor.data))) if work_form.provisionActivityStatement.data is not None: self.graph.add( (work, SCHEMA.provisionActivityStatement,",
"git_profile.cc_people, False) citation.populate() citation.populate_article() citation.add_article() elif work_type.startswith(\"book\"): citation = utilities.Book_Citation(raw_citation, temp_work, git_profile.cc_people, False)",
"Administrators for review.\"\"\" def __init__(self, config, person_iri): self.config = config self.triplestore_url = self.config.get(\"TRIPLESTORE_URL\")",
"None: self.fast_subjects.add((subject, rdflib.RDFS.label, subject_label)) def __update_research_statements__(self): existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) current_description =",
"issue, new_issue) def add_creative_work(**kwargs): \"\"\"Calls utilities to populate and save to datastore\"\"\" config",
"profile.graph.add((person_iri, rdflib.RDFS.label, rdflib.Literal(label, lang=\"en\"))) email = form.get(\"email\") profile.graph.add((person_iri, SCHEMA.email.rdflib, rdflib.Literal(email))) add_qualified_generation(profile.graph, person_iri, generated_by)",
"\"editCitation('{}');\".format(iri) delete_click = \"deleteCitation('{}');\".format(iri) edit_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-warning disabled\", \"onclick\": edit_click,",
"server = smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port']) server.ehlo() if config.get('EMAIL')['tls']: server.starttls() server.ehlo() server.login(sender, config.get(\"EMAIL\")[\"password\"]) recipients =",
"import smtplib import subprocess import threading import uuid from email.mime.multipart import MIMEMultipart from",
"repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) return raw_turtle def __save_graph__(self, **kwargs): git_repo = kwargs.get(\"git_repo\") file_path",
"= rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = person_iri msg = \"{} made the following changes",
"last resort self.graph.add((work_iri, SCHEMA.partOf, journal)) if work_form.month.data != None: self.graph.add((work_iri, CITE.month, rdflib.Literal(work_form.month.data))) def",
"form = kwargs.get('form') current_user = kwargs.get(\"current_user\") output = '' person_iri = rdflib.URIRef(form.get(\"iri\")) profile",
"profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {}\".format(label), lang=\"en\"))) profile.graph.add(",
"rdflib.Literal(work_form.issue_number.data))) if volume is not None: self.graph.add((issue, SCHEMA.partOf, volume)) else: self.graph.add((issue, SCHEMA.partOf, journal))",
"SCHEMA.ScholarlyArticle)) self.graph.add((work_iri, SCHEMA.name, rdflib.Literal(work_form.article_title.data))) if work_form.page_start.data !=None: self.graph.add((work_iri, SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data))) if work_form.page_end.data !=None:",
"= kwargs.get(\"config\") cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.tutt_github = cc_github.get_organization(\"Tutt-Library\") self.statement_msg = kwargs.get(\"msg\") self.person_iri",
"= kwargs.get(\"revised_by\") raw_citation = kwargs.get(\"citation\") work_type = kwargs.get(\"work_type\", \"article\") if revised_by is None",
"under_review.string = \"In Review\" col_1.append(under_review) div.append(col_1) col_2 = soup.new_tag(\"div\", **{\"class\": \"col-7\"}) if \"article_title\"",
"SCHEMA.author.rdflib, person_iri)) statement = form.get(\"research_stmt\") if len(statement) > 0: profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement,",
"row.startswith(\"csrf_token\"): continue field = getattr(work_form, row) email_body += \"\\n{}:\\t{}\".format(row, field.data) self.__send_email__(\"Added New Work\",",
"= citation.get(\"page_start\") if page_start and len(page_start) > 0: span = soup.new_tag(\"span\") span.string =",
"in self.profile.graph.objects( subject=existing_stmt, predicate=SCHEMA.about): new_subjects.add(fast_heading) for subject in list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt, SCHEMA.about, subject)) for",
"for prefix, namespace in git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix, namespace) if work_type.startswith(\"article\"): citation = utilities.Article_Citation(raw_citation, temp_work,",
"in list(new_subjects.difference(existing_subjects)): # Add new subject to research statements and fast subjects self.research_statements.add((existing_stmt,",
"len(work_form.doi.data) > 0: work_iri = rdflib.URIRef(work_form.doi.data) else: work_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) self.graph.add((work_iri,",
"\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) self.graph.add((work_iri, SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri, CITE.authorString, rdflib.Literal(work_form.author_string.data))) if generated_by: add_qualified_generation(self.graph, work_iri, generated_by)",
"subject=\"Delete Request\", text=\"Delete citation {} for {}\\nrequested by {} on {}\".format( iri, author,",
"\"url\" in work_form and len(work_form.url.data) > 0: self.graph.add((work_iri, SCHEMA.url, rdflib.URIRef(work_form.url.data))) if work_form.abstract.data !=",
"EmailProfile(object): \"\"\"Simple Email Profile class that creates a local RDF graph for new",
"in self.scholarship_repo.get_dir_contents(\"/data/\"): try: raw_turtle = content.decoded_content except GithubException: blob = self.scholarship_repo.get_git_blob(content.sha) raw_turtle =",
"SCHEMA.editor, rdflib.Literal(work_form.editor.data))) if work_form.provisionActivityStatement.data is not None: self.graph.add( (work, SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data))) if work_form.notes.data",
"hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"] = hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest() def __get_content__(self, repo_name, content): raw_turtle = None",
"None volume_or_issue = work_graph.value(predicate=SCHEMA.partOf, object=entity) schema_class = work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type) if schema_class is SCHEMA.volumeNumber:",
"kwargs.get(\"config\") git_profile = GitProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') author = kwargs.get(\"author\")",
"self.tutt_github = cc_github.get_organization(\"Tutt-Library\") self.statement_msg = kwargs.get(\"msg\") self.person_iri = kwargs.get(\"person\") self.research_statements = rdflib.Graph() self.fast_subjects",
"= person_iri profile.graph.add( (person_iri, rdflib.RDF.type, BF.Person.rdflib)) given_name = form.get(\"given_name\") if given_name is not",
"citation.get(\"issue_number\") if issue_number and len(issue_number ) > 0: span = soup.new_tag(\"span\") span.string =",
"profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement",
"Creative Work\") return {\"message\": \"New work has been submitted for review\", \"status\": True,",
"person=self.person_iri, profile=self) BACKGROUND_THREAD.start() self.__send_email__(\"Updating Profile\", message) def __email_work__(**kwargs): \"\"\"Function takes a work graph",
"in new_subjects.items(): iri_subject = rdflib.URIRef(fast_subject) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) existing_label = profile.fast_subjects.value( subject=iri_subject,",
"Dict of form values \"\"\" if len(work_form.iri.data) > 0: work_iri = rdflib.URIRef(work_form.iri.data) else:",
"= \"New {} as {} to Colorado College's Scholarship Graph\".format( label, person_iri) profile.new(message)",
"= kwargs.get(\"git_repo\") file_path = kwargs.get(\"file_path\") graph_name = kwargs.get(\"graph_name\") branch = kwargs.get(\"branch\") message =",
"self.graph.add((work_iri, SCHEMA.partOf, volume)) else: # Add work_iri to Journal as last resort self.graph.add((work_iri,",
"Quick dedup email_server.sendmail(self.email.get(\"user\"), recipients, message.as_string()) email_server.close() def __add_article__(self, work_iri, work_form): \"\"\"Article specific data",
"SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data))) journal = rdflib.BNode() self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical)) self.graph.add((journal, SCHEMA.name, rdflib.Literal(work_form.journal_title.data))) issue, volume",
"rdflib.Literal(work_form.editionStatement.data))) if work_form.editor.data is not None: self.graph.add((work, SCHEMA.editor, rdflib.Literal(work_form.editor.data))) if work_form.provisionActivityStatement.data is not",
"bibcat import datetime import hashlib import io import os import pprint import smtplib",
"= utilities.Article_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_article() citation.add_article() elif work_type.startswith(\"book\"): citation = utilities.Book_Citation(raw_citation,",
"?volume schema:partOf ?work ; schema:volumeNumber ?volumeNumber . BIND(<{0}> as ?work) BIND(\"{1}\" as ?volumeNumber)",
"\"\"\"Calls utilities to populate and save to datastore\"\"\" config = kwargs.get(\"config\") profile =",
"\"{}_git\".format(graph_name)) if branch: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha)",
") return {\"message\": \"Deletion of {} for {} under review\".format( iri, author), \"status\":",
"work_form and len(work_form.doi.data) > 0: work_iri = rdflib.URIRef(work_form.doi.data) else: work_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(",
"if generated_by: add_qualified_generation(self.graph, work_iri, generated_by) citation_type = work_form.citation_type.data self.graph.add((work_iri, CITE.citationType, rdflib.Literal(citation_type))) if \"author\"",
"= soup.new_tag(\"em\") em.string = citation.get(\"journal_title\") col_2.append(em) if \"year\" in citation: span = soup.new_tag(\"span\")",
"is not None: self.graph.add( (work, SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data))) if work_form.notes.data is not None: self.graph.add(",
"existing_label is None: profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) message",
"Add new subject to research statements and fast subjects self.research_statements.add((existing_stmt, SCHEMA.about, subject)) self.fast_subjects.add((subject,",
"generated_by) email_body = \"Properties and Values for Creative Work {}\".format(work_iri) for row in",
"sender = config.get('EMAIL')['user'] recipients = config.get(\"ADMINS\") subject = kwargs.get('subject') text = kwargs.get('text') carbon_copy",
"temp_work.namespace_manager.bind(prefix, namespace) if work_type.startswith(\"article\"): citation = utilities.Article_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_article() citation.add_article()",
"config_manager = kwargs.get('config_manager') connection = config_manager.conns revised_by = kwargs.get(\"revised_by\") raw_citation = kwargs.get(\"citation\") work_type",
"\"fas fa-edit\"})) col_3.append(edit_a) delete_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-danger\", \"onclick\": delete_click, \"type=\": \"input\"})",
"BIND(<{0}> as ?volume) BIND(<{1}> as ?periodical) BIND(\"{2}\" as ?issue_number) }}\"\"\".format(volume, periodical, issue_number) )",
"self.__save_graph__( git_repo=self.tiger_repo, file_path=self.current_year_path, graph_name=\"current_year\", message=\"{} person to Department for school year\".format(action)) self.__save_graph__( git_repo=self.scholarship_repo,",
"rdflib.Graph() self.graph.namespace_manager.bind(\"bf\", BF) self.graph.namespace_manager.bind(\"cite\", CITE) self.graph.namespace_manager.bind(\"schema\", SCHEMA) self.graph.namespace_manager.bind(\"prov\", PROV) self.email = config.get(\"EMAIL\") self.recipients",
"return work_iri def add(self, work_form, generated_by=None): work_iri = self.__populate_work__(work_form, generated_by) email_body = \"Properties",
"for Article work_form(Flask.request.form): Dict of form values \"\"\" self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.ScholarlyArticle)) self.graph.add((work_iri, SCHEMA.name,",
"if work_form.volume_number.data != None: volume = rdflib.BNode() self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume)) self.graph.add((volume, SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data)))",
"import rdflib import requests from bs4 import BeautifulSoup from flask import current_app from",
"graph_name=\"research_statements\", message=self.statement_msg) self.__save_graph__( file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") class EmailProfile(object): \"\"\"Simple Email",
"given_name = form.get(\"given_name\") if given_name is not None: profile.graph.add( (person_iri, SCHEMA.givenName.rdflib, rdflib.Literal(given_name, lang=\"en\")))",
"{} on {}, see attached RDF turtle file\".format( citation.citation_type, revised_by, datetime.datetime.utcnow().isoformat()) ) return",
"?volume WHERE {{ ?volume schema:partOf ?work ; schema:volumeNumber ?volumeNumber . BIND(<{0}> as ?work)",
"subject in list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt, SCHEMA.about, subject)) for subject in list(new_subjects.difference(existing_subjects)): # Add new",
"elif generated_by: self.person_iri = generated_by self.graph.add((work_iri, SCHEMA.author, generated_by)) if \"url\" in work_form and",
"rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for prefix, namespace in git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix, namespace) if work_type.startswith(\"article\"): citation = utilities.Article_Citation(raw_citation,",
"+= \"\\n{}:\\t{}\".format(row, field.data) self.__send_email__(\"Added New Work\", email_body) return work_iri def new(self, message): \"\"\"Adds",
"Github, GithubException import utilities from .sparql import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI from .sparql import",
"{} {}\".format( form.get('given_name'), form.get('family_name')), lang=\"en\"))) add_qualified_generation( profile.graph, statement_iri, generated_by) citations = form.getlist(\"citations\") for",
"= kwargs.get(\"branch\") graph_name = kwargs.get(\"graph_name\") graph = getattr(self, graph_name) message = kwargs.get(\"message\", \"Updating",
"= kwargs.get(\"title-iri\") profile.graph.add( (dept_year_iri, rdflib.URIRef(title), person_iri)) statement = kwargs.get(\"statement\", form.get(\"research_stmt\")) if statement is",
"if iri: edit_click = \"editCitation('{}');\".format(iri) delete_click = \"deleteCitation('{}');\".format(iri) edit_a = soup.new_tag(\"a\", **{\"class\": \"btn",
"Email Profile class that creates a local RDF graph for new profile or",
"SCHEMA.about.rdflib, iri_subject)) existing_label = profile.fast_subjects.value( subject=iri_subject, predicate=rdflib.RDFS.label) if existing_label is None: profile.graph.add( (iri_subject,",
"format='turtle') self.graph_hashes[\"cc_people\"] = hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"] = hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest() # Start retrieving and",
"rdflib.Graph() self.fast_subjects = rdflib.Graph() self.profile = kwargs.get(\"profile\") self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in",
"a local RDF graph for new profile or editing profile that is send",
") > 0: span = soup.new_tag(\"span\") span.string = \" no. {}\".format(issue_number) col_2.append(span) page_start",
"0: work_iri = rdflib.URIRef(work_form.iri.data) else: # Mint IRI for new work if \"doi\"",
"attachment.add_header('Content-Disposition', 'attachment', filename='work.ttl') message.attach(attachment) #try: server = smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port']) server.ehlo() if config.get('EMAIL')['tls']: server.starttls()",
"kwargs.get('text') carbon_copy = kwargs.get(\"carbon_copy\", []) message = MIMEMultipart() message[\"From\"] = sender message[\"Subject\"] =",
"config_manager.conns iri = kwargs.get(\"iri\") __email_work__( config=config, carbon_copy=[current_user.data.get('mail'),], subject=\"Delete Request\", text=\"Delete citation {} for",
"content self.fast_subjects.parse( data=raw_turtle, format='turtle') def __save_graph__(self, **kwargs): file_path = kwargs.get(\"file_path\") branch = kwargs.get(\"branch\")",
"subject added\") self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/creative-works.ttl\", graph_name=\"creative_works\", message=\"Creative Works added\") if connection: self.__reload_triplestore__(connection)",
"list(new_subjects.difference(existing_subjects)): # Add new subject to research statements and fast subjects self.research_statements.add((existing_stmt, SCHEMA.about,",
"in list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt, SCHEMA.about, subject)) for subject in list(new_subjects.difference(existing_subjects)): # Add new subject",
"\"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri] = fast_label for fast_subject, fast_label in new_subjects.items(): iri_subject = rdflib.URIRef(fast_subject) profile.graph.add(",
"\"url\" in citation: work_link = soup.new_tag(\"a\", href=citation.get(\"url\")) work_link.string = name col_2.append(work_link) else: span",
"predicate=SCHEMA.volumeNumber) result = connection.datastore.query(\"\"\"SELECT ?volume WHERE {{ ?volume schema:partOf ?work ; schema:volumeNumber ?volumeNumber",
"message.attach(attachment) #try: server = smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port']) server.ehlo() if config.get('EMAIL')['tls']: server.starttls() server.ehlo() server.login(sender, config.get(\"EMAIL\")[\"password\"])",
"connection = config_manager.conns revised_by = kwargs.get(\"revised_by\") raw_citation = kwargs.get(\"citation\") work_type = kwargs.get(\"work_type\", \"article\")",
"parsing latest RDF for creative works, # research statements, and FAST subjects self.creative_works",
"volume = None, None if work_form.volume_number.data != None: volume = rdflib.BNode() self.graph.add((volume, rdflib.RDF.type,",
"= kwargs.get(\"message\", \"Updating {}\".format(graph_name)) git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'),",
"def update(self, message): \"\"\"Edits existing profile\"\"\" global BACKGROUND_THREAD BACKGROUND_THREAD = ProfileUpdateThread( config=self.config, msg=message,",
"if page_start and len(page_start) > 0: span = soup.new_tag(\"span\") span.string = \"p. {}\".format(page_start)",
"self.cc_people = rdflib.Graph() self.tiger_repo = self.tutt_github.get_repo(\"tiger-catalog\") for content in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle = self.__get_content__(\"tiger_repo\",",
"for fast_heading in self.profile.graph.objects( subject=existing_stmt, predicate=SCHEMA.about): new_subjects.add(fast_heading) for subject in list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt, SCHEMA.about,",
"self.tutt_github = cc_github.get_organization(\"Tutt-Library\") # Start retrieving and parsing latest RDF for current academic",
"if revised_by: add_qualified_revision(temp_work, rdflib.URIRef(citation.iri), revised_by) email_subject = 'Edited Creative Work {}'.format(citation.iri) __email_work__(graph=temp_work, config=config,",
"self.graph.add((volume, SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume, SCHEMA.partOf, journal)) if work_form.issue_number.data != None: issue = rdflib.BNode()",
"= Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.tutt_github = cc_github.get_organization(\"Tutt-Library\") self.statement_msg = kwargs.get(\"msg\") self.person_iri = kwargs.get(\"person\") self.research_statements",
"self.person_iri)) elif generated_by: self.person_iri = generated_by self.graph.add((work_iri, SCHEMA.author, generated_by)) if \"url\" in work_form",
"soup.new_tag(\"div\", **{\"class\": \"row\"}) col_1 = soup.new_tag(\"div\", **{\"class\": \"col-1\"}) citation_type = citation.get(\"ENTRYTYPE\") if citation_type.startswith(\"article\"):",
"len(work_form.url.data) > 0: self.graph.add((work_iri, SCHEMA.url, rdflib.URIRef(work_form.url.data))) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data)))",
"datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Changes to work has been submitted for review\", \"status\":",
"\"Deletion of {} for {} under review\".format( iri, author), \"status\": True} def edit_creative_work(**kwargs):",
"\"New {} as {} to Colorado College's Scholarship Graph\".format( label, person_iri) profile.new(message) def",
"div.append(col_3) return div.prettify() def __reconcile_article__(work_graph, connection): SCHEMA = rdflib.Namespace(\"http://schema.org/\") for row in work_graph.query(",
"= kwargs.get(\"statement\", form.get(\"research_stmt\")) if statement is not None: statement_iri = rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) profile.graph.add(",
"class that creates a local RDF graph for new profile or editing profile",
"if revised_by is None and current_user_email: sparql = EMAIL_LOOKUP.format( current_user_email.lower()) email_results = connection.datastore.query(sparql)",
"prefix, namespace in git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix, namespace) if work_type.startswith(\"article\"): citation = utilities.Article_Citation(raw_citation, temp_work, git_profile.cc_people,",
"= rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV = rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA = rdflib.Namespace(\"http://schema.org/\") class GitProfile(object): def __init__(self, config):",
"= kwargs.get(\"config\") profile = EmailProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') connection =",
"predicate=SCHEMA.about): existing_subjects.add(row) for fast_heading in self.profile.graph.objects( subject=existing_stmt, predicate=SCHEMA.about): new_subjects.add(fast_heading) for subject in list(existing_subjects.difference(new_subjects)):",
"soup.new_tag(\"a\", **{\"class\": \"btn btn-warning disabled\", \"onclick\": edit_click, \"type=\": \"input\"}) edit_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-edit\"}))",
"result = subprocess.run(['git', 'pull', 'origin', 'master']) click.echo(result.returncode, result.stdout) config_mgr.conns.datastore.mgr.reset() class ProfileUpdateThread(threading.Thread): def __init__(self,",
"data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"creative-works\"): self.creative_works_git",
"message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: self.scholarship_repo.update_file(file_path, message,",
"to administrators with attached profile graph\"\"\" message = MIMEMultipart() message[\"From\"] = self.email.get(\"user\") message[\"To\"]",
"object=entity) schema_class = work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type) if schema_class is SCHEMA.volumeNumber: volume = volume_or_issue issue",
"= self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle = self.__get_content__(\"scholarship_repo\", content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git",
"\"fas fa-trash-alt\"})) col_3.append(delete_a) div.append(col_3) return div.prettify() def __reconcile_article__(work_graph, connection): SCHEMA = rdflib.Namespace(\"http://schema.org/\") for",
"= connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by",
"graph_name=\"creative_works\", message=\"Creative Works added\") if connection: self.__reload_triplestore__(connection) def __reload_triplestore__(self, config_mgr): data_upload = []",
"self.email = config.get(\"EMAIL\") self.recipients = config.get(\"ADMINS\") self.person_iri = person_iri def __send_email__(self, subject, body):",
"pprint import smtplib import subprocess import threading import uuid from email.mime.multipart import MIMEMultipart",
"{}\".format( form.get('given_name'), form.get('family_name')), lang=\"en\"))) add_qualified_generation( profile.graph, statement_iri, generated_by) citations = form.getlist(\"citations\") for uri",
"graph.serialize(format='turtle'), git_graph.sha) def update_all(self, person_label, action=\"Add\", connection=None): self.__save_graph__( git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\", message=\"{} {}",
"SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data))) if work_form.editor.data is not None: self.graph.add((work, SCHEMA.editor, rdflib.Literal(work_form.editor.data))) if work_form.provisionActivityStatement.data is",
"= work_form.citation_type.data self.graph.add((work_iri, CITE.citationType, rdflib.Literal(citation_type))) if \"author\" in work_form and len(work_form.author.data) > 0:",
"fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri] = fast_label for fast_subject, fast_label",
"generated_by) citation_type = work_form.citation_type.data self.graph.add((work_iri, CITE.citationType, rdflib.Literal(citation_type))) if \"author\" in work_form and len(work_form.author.data)",
"citation: work_link = soup.new_tag(\"a\", href=citation.get(\"url\")) work_link.string = name col_2.append(work_link) else: span = soup.new_tag(\"span\")",
"profile.fast_subjects.value( subject=iri_subject, predicate=rdflib.RDFS.label) if existing_label is None: profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject,",
"person_iri def __send_email__(self, subject, body): \"\"\"Sends email to administrators with attached profile graph\"\"\"",
"config_manager.nsm.bf SCHEMA = config_manager.nsm.schema results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0:",
"subject=existing_stmt, predicate=SCHEMA.about): existing_subjects.add(row) for fast_heading in self.profile.graph.objects( subject=existing_stmt, predicate=SCHEMA.about): new_subjects.add(fast_heading) for subject in",
".sparql import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI from .sparql import add_qualified_generation, add_qualified_revision BF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\")",
"config = kwargs.get(\"config\") current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') profile = EmailProfile(config) connection",
"!= str(entity): new_work = rdflib.URIRef(periodical) bibcat.replace_iri(work_graph, entity, new_work) entity = new_work if volume",
"rdflib.Graph() temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for prefix, namespace in git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix, namespace) if work_type.startswith(\"article\"): citation",
"under review\".format( iri, author), \"status\": True} def edit_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile =",
"filename='work.ttl') message.attach(attachment) #try: server = smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port']) server.ehlo() if config.get('EMAIL')['tls']: server.starttls() server.ehlo() server.login(sender,",
"OPTIONAL {{ ?issue schema:partOf ?periodical . }} BIND(<{0}> as ?volume) BIND(<{1}> as ?periodical)",
"soup.new_tag(\"span\") span.string = \" no. {}\".format(issue_number) col_2.append(span) page_start = citation.get(\"page_start\") if page_start and",
"\"\"\"): entity, label = row break volume, issue = None, None volume_or_issue =",
"revised_by = kwargs.get(\"revised_by\") raw_citation = kwargs.get(\"citation\") work_type = kwargs.get(\"work_type\", \"article\") if revised_by is",
"= kwargs.get(\"graph_name\") branch = kwargs.get(\"branch\") message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) graph = getattr(self,",
"def __populate_work__(self, work_form, generated_by=None): \"\"\"Populates graph with new work Args: form(Flask.request.form): Dict of",
"work_iri to Journal as last resort self.graph.add((work_iri, SCHEMA.partOf, journal)) if work_form.month.data != None:",
"= soup.new_tag(\"span\") span.string = \"({0})\".format(citation.get(\"year\")) col_2.append(span) vol_number = citation.get(\"volume_number\") if vol_number and len(vol_number)",
"self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) return work_iri def add(self, work_form, generated_by=None): work_iri = self.__populate_work__(work_form, generated_by)",
"= soup.new_tag(\"span\") span.string = \"v. {}\".format(vol_number) col_2.append(span) issue_number = citation.get(\"issue_number\") if issue_number and",
"not None: self.graph.add((work, SCHEMA.isbn, rdflib.Literal(work_form.isbn.data))) if work_form.editionStatement.data is not None: self.graph.add( (work, SCHEMA.editionStatement,",
"(person_iri, SCHEMA.givenName.rdflib, rdflib.Literal(given_name, lang=\"en\"))) family_name = form.get(\"family_name\") if family_name is not None: profile.graph.add((person_iri,",
"len(work_form.author.data) > 0: self.person_iri = rdflib.URIRef(work_form.author.data) self.graph.add((work_iri, SCHEMA.author, self.person_iri)) elif generated_by: self.person_iri =",
"WHERE {{ ?periodical schema:name ?name . FILTER(CONTAINS(?name, \"{0}\")) }}\"\"\".format(label)) if result and len(result)",
"config.get(\"GITHUB_PWD\")) self.triplestore_url = config.get(\"TRIPLESTORE_URL\") self.tutt_github = cc_github.get_organization(\"Tutt-Library\") # Start retrieving and parsing latest",
"= rdflib.Graph() self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content",
"def __reconcile_article__(work_graph, connection): SCHEMA = rdflib.Namespace(\"http://schema.org/\") for row in work_graph.query( \"\"\"SELECT ?entity ?label",
"end_year = now.year + 1 self.current_year_path = \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year, end_year) self.current_year = rdflib.Graph()",
"= None, None volume_or_issue = work_graph.value(predicate=SCHEMA.partOf, object=entity) schema_class = work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type) if schema_class",
"__add_book__(self, work, work_form): self.graph.add((work, rdflib.RDF.type, SCHEMA.Book)) self.graph.add((work, SCHEMA.title, rdflib.Literal(work_form.book_title.data))) if work_form.isbn.data is not",
"data=raw_turtle, format='turtle') def __save_graph__(self, **kwargs): file_path = kwargs.get(\"file_path\") branch = kwargs.get(\"branch\") graph_name =",
"if statement is not None: statement_iri = rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib))",
"citation.get(\"journal_title\") col_2.append(em) if \"year\" in citation: span = soup.new_tag(\"span\") span.string = \"({0})\".format(citation.get(\"year\")) col_2.append(span)",
"rdflib.BNode() self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical)) self.graph.add((journal, SCHEMA.name, rdflib.Literal(work_form.journal_title.data))) issue, volume = None, None if",
"SCHEMA.partOf, book_bnode)) self.__add_book__(book_bnode, work_form) elif citation_type.startswith(\"book\"): self.__add_book__(work_iri, work_form) else: abort(500) if work_form.abstract.data !=",
"subject added\") class EmailProfile(object): \"\"\"Simple Email Profile class that creates a local RDF",
"None: issue = rdflib.BNode() self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue)) self.graph.add((issue, SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data))) if volume is",
"raw_turtle = base64.b64decode(blob.content) return raw_turtle def __save_graph__(self, **kwargs): git_repo = kwargs.get(\"git_repo\") file_path =",
"self.graph.add((work, SCHEMA.editor, rdflib.Literal(work_form.editor.data))) if work_form.provisionActivityStatement.data is not None: self.graph.add( (work, SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data))) if",
"> 0: periodical = result[0].get(\"periodical\").get(\"value\") if periodical != str(entity): new_work = rdflib.URIRef(periodical) bibcat.replace_iri(work_graph,",
"text=\"Delete citation {} for {}\\nrequested by {} on {}\".format( iri, author, current_user.data.get('mail'), datetime.datetime.utcnow().isoformat())",
"import threading import uuid from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import",
"self.tiger_repo = self.tutt_github.get_repo(\"tiger-catalog\") for content in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle = self.__get_content__(\"tiger_repo\", content) if content.name.startswith(self.current_year_path.split(\"/\")[-1]):",
"!=None: self.graph.add((work_iri, SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data))) journal = rdflib.BNode() self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical)) self.graph.add((journal, SCHEMA.name, rdflib.Literal(work_form.journal_title.data)))",
"BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results)",
"row break volume, issue = None, None volume_or_issue = work_graph.value(predicate=SCHEMA.partOf, object=entity) schema_class =",
"0: span = soup.new_tag(\"span\") span.string = \" no. {}\".format(issue_number) col_2.append(span) page_start = citation.get(\"page_start\")",
"academic profile:\\n\".format( generated_by, form['label']) statement_iri_results = connection.datastore.query( RESEARCH_STMT_IRI.format( person_iri)) if len(statement_iri_results) > 0:",
"graph_name=\"current_year\", message=\"{} person to Department for school year\".format(action)) self.__save_graph__( git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=\"{}",
"threading.Thread.__init__(self) config = kwargs.get(\"config\") cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.tutt_github = cc_github.get_organization(\"Tutt-Library\") self.statement_msg =",
"is not None: self.graph.add((work_iri, SCHEMA.partOf, volume)) else: # Add work_iri to Journal as",
"self.graph.add((work, SCHEMA.isbn, rdflib.Literal(work_form.isbn.data))) if work_form.editionStatement.data is not None: self.graph.add( (work, SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data))) if",
"emails the graph in turtle format to the administrators for review before adding",
"graph_name = kwargs.get(\"graph_name\") graph = getattr(self, graph_name) message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) git_graph",
"kwargs.get('subject') text = kwargs.get('text') carbon_copy = kwargs.get(\"carbon_copy\", []) message = MIMEMultipart() message[\"From\"] =",
"MIMEMultipart() message[\"From\"] = sender message[\"Subject\"] = subject message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in",
"rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {} {}\".format( form.get('given_name'), form.get('family_name')), lang=\"en\"))) add_qualified_generation( profile.graph, statement_iri, generated_by)",
"vol_num = work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber) result = connection.datastore.query(\"\"\"SELECT ?volume WHERE {{ ?volume schema:partOf ?work",
"review.\"\"\" def __init__(self, config, person_iri): self.config = config self.triplestore_url = self.config.get(\"TRIPLESTORE_URL\") self.graph =",
"rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume, SCHEMA.partOf, journal)) if work_form.issue_number.data != None: issue = rdflib.BNode() self.graph.add((issue, rdflib.RDF.type,",
"statement_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add(",
"None: profile.graph.add( (person_iri, SCHEMA.givenName.rdflib, rdflib.Literal(given_name, lang=\"en\"))) family_name = form.get(\"family_name\") if family_name is not",
"person to Department for school year\".format(action)) self.__save_graph__( git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=\"{} Research Statement",
"to populate and save to datastore\"\"\" config = kwargs.get(\"config\") profile = EmailProfile(config) current_user",
"form.getlist(\"subjects\") new_subjects = {} for row in form_subjects: fast_id, fast_label = row.split(\"==\") if",
"and emails the graph in turtle format to the administrators for review before",
"kwargs.get('config_manager') connection = config_manager.conns generated_by = kwargs.get(\"generated_by\") work_form = kwargs.get(\"work_form\") BF = config_manager.nsm.bf",
"Mint IRI for new work if \"doi\" in work_form and len(work_form.doi.data) > 0:",
"connection.datastore.query(\"\"\"SELECT ?issue WHERE {{ ?issue rdf:type schema:issueNumber ; schema:issueNumber ?issue_number . OPTIONAL {{",
"\"v. {}\".format(vol_number) col_2.append(span) issue_number = citation.get(\"issue_number\") if issue_number and len(issue_number ) > 0:",
"} \"\"\"): entity, label = row break volume, issue = None, None volume_or_issue",
"iri_subject = rdflib.URIRef(fast_uri) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject,",
"citation.get(\"page_end\") if page_end and len(page_end) > 0: span = soup.new_tag(\"span\") if \"page_start\" in",
"rdflib.Graph() self.profile = kwargs.get(\"profile\") self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): try: raw_turtle",
"citation.citation_type, revised_by, datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Changes to work has been submitted for",
"subject_label = self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label) if subject_label is not None: self.fast_subjects.add((subject, rdflib.RDFS.label, subject_label)) def",
"0: profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) form_subjects = form.getlist(\"subjects\") new_subjects = {} for",
"= now.year - 1 end_year = now.year else: start_year = now.year end_year =",
"generated_by is None: generated_by = person_iri profile.graph.add( (person_iri, rdflib.RDF.type, BF.Person.rdflib)) given_name = form.get(\"given_name\")",
"existing_subjects, new_subjects = set(), set() existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) for row in",
"object=self.person_iri) current_description = self.research_statements.value( subject=existing_stmt, predicate=SCHEMA.description) new_description = self.profile.graph.value( subject=existing_stmt, predicate=SCHEMA.description) if new_description",
"?volume . }} OPTIONAL {{ ?issue schema:partOf ?periodical . }} BIND(<{0}> as ?volume)",
"for subject in list(new_subjects.difference(existing_subjects)): # Add new subject to research statements and fast",
"- 1 end_year = now.year else: start_year = now.year end_year = now.year +",
"rdflib.URIRef(periodical) bibcat.replace_iri(work_graph, entity, new_work) entity = new_work if volume is not None: vol_num",
"# Add new subject to research statements and fast subjects self.research_statements.add((existing_stmt, SCHEMA.about, subject))",
"content) if content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git = content self.current_year.parse(data=raw_turtle, format='turtle') if content.name.startswith(\"cc-people\"): self.cc_people_git = content",
"row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri] = fast_label",
"graph and configuration and emails the graph in turtle format to the administrators",
"= profile.fast_subjects.value( subject=iri_subject, predicate=rdflib.RDFS.label) if existing_label is None: profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add(",
"ProfileUpdateThread( config=self.config, msg=message, person=self.person_iri, profile=self) BACKGROUND_THREAD.start() self.__send_email__(\"Updating Profile\", message) def __email_work__(**kwargs): \"\"\"Function takes",
"profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) add_qualified_generation(profile.graph, statement_iri, generated_by) form_subjects",
"rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {}",
"if \"year\" in citation: span = soup.new_tag(\"span\") span.string = \"({0})\".format(citation.get(\"year\")) col_2.append(span) vol_number =",
"col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-file-alt\"})) elif citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-book\"})) under_review = soup.new_tag(\"em\")",
"config_manager.conns generated_by = kwargs.get(\"generated_by\") work_form = kwargs.get(\"work_form\") BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema",
"work_link = soup.new_tag(\"a\", href=citation.get(\"url\")) work_link.string = name col_2.append(work_link) else: span = soup.new_tag(\"span\") span.string",
"is not None: self.graph.add((issue, SCHEMA.partOf, volume)) else: self.graph.add((issue, SCHEMA.partOf, journal)) self.graph.add((work_iri, SCHEMA.partOf, issue))",
"if result and len(result) > 0: new_issue = rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph, issue, new_issue) def",
"Statement for {} {}\".format( form.get('given_name'), form.get('family_name')), lang=\"en\"))) add_qualified_generation( profile.graph, statement_iri, generated_by) citations =",
"else: generated_by = person_iri msg = \"{} made the following changes to {}'s",
"{} as {} to Colorado College's Scholarship Graph\".format( label, person_iri) profile.new(message) def delete_creative_work(**kwargs):",
"each repository for directory in data_upload: os.chdir(directory) result = subprocess.run(['git', 'pull', 'origin', 'master'])",
"work_form.citation_type.data self.graph.add((work_iri, CITE.citationType, rdflib.Literal(citation_type))) if \"author\" in work_form and len(work_form.author.data) > 0: self.person_iri",
"work_form.editor.data is not None: self.graph.add((work, SCHEMA.editor, rdflib.Literal(work_form.editor.data))) if work_form.provisionActivityStatement.data is not None: self.graph.add(",
"= fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject = rdflib.URIRef(fast_uri) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject))",
"\" no. {}\".format(issue_number) col_2.append(span) page_start = citation.get(\"page_start\") if page_start and len(page_start) > 0:",
"fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri] = fast_label for fast_subject, fast_label in new_subjects.items(): iri_subject =",
"len(result) > 0: new_issue = rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph, issue, new_issue) def add_creative_work(**kwargs): \"\"\"Calls utilities",
"for {}\".format(label), lang=\"en\"))) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) add_qualified_generation(profile.graph,",
"elif schema_class is SCHEMA.issueNumber: issue = volume_or_issue result = connection.datastore.query(\"\"\"SELECT ?periodical WHERE {{",
"git_graph.sha, branch=branch) else: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def update_all(self, person_label, action=\"Add\", connection=None): self.__save_graph__(",
"= rdflib.Namespace(\"http://schema.org/\") class GitProfile(object): def __init__(self, config): self.graph_hashes = {} cc_github = Github(config.get(\"GITHUB_USER\"),",
"connection): SCHEMA = rdflib.Namespace(\"http://schema.org/\") for row in work_graph.query( \"\"\"SELECT ?entity ?label WHERE {",
"= MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='work.ttl') message.attach(attachment) #try: server = smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port']) server.ehlo() if",
"= \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri] = fast_label for fast_subject, fast_label in new_subjects.items(): iri_subject = rdflib.URIRef(fast_subject)",
"0: self.graph.add((work_iri, SCHEMA.url, rdflib.URIRef(work_form.url.data))) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) if citation_type.startswith(\"article\"):",
"generated_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) work_iri = rdflib.URIRef(profile.add(work_form, generated_by)) #profile.update(\"Added or Updated Creative Work\")",
"email to the Administrators for review.\"\"\" def __init__(self, config, person_iri): self.config = config",
"set(), set() existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) for row in self.research_statements.objects( subject=existing_stmt, predicate=SCHEMA.about):",
"MIMEText(body, _charset=\"UTF-8\") message.attach(body) graph_turtle = io.StringIO( self.graph.serialize(format='turtle').decode()) attachment = MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='profile.ttl')",
"= rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for",
"RDF turtle file\".format( citation.citation_type, revised_by, datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Changes to work has",
"= rdflib.URIRef(work_form.iri.data) else: # Mint IRI for new work if \"doi\" in work_form",
"predicate=SCHEMA.accountablePerson, object=self.person_iri) for row in self.research_statements.objects( subject=existing_stmt, predicate=SCHEMA.about): existing_subjects.add(row) for fast_heading in self.profile.graph.objects(",
"else: # Add work_iri to Journal as last resort self.graph.add((work_iri, SCHEMA.partOf, journal)) if",
"< 7: start_year = now.year - 1 end_year = now.year else: start_year =",
"and fast subjects self.research_statements.add((existing_stmt, SCHEMA.about, subject)) self.fast_subjects.add((subject, rdflib.RDF.type, BF.Topic)) subject_label = self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label)",
"issue_number) ) if result and len(result) > 0: new_issue = rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph, issue,",
"changes to self.__update_fast_subjects__() self.__update_research_statements__() self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=self.statement_msg) self.__save_graph__( file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast",
"on {}\".format( iri, author, current_user.data.get('mail'), datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Deletion of {} for",
"= config_manager.nsm.schema form = kwargs.get('form') current_user = kwargs.get(\"current_user\") output = '' person_iri =",
"= kwargs.get(\"work_form\") BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema sparql = EMAIL_LOOKUP.format( current_user.data.get('mail').lower()) email_results",
"{} for {}\\nrequested by {} on {}\".format( iri, author, current_user.data.get('mail'), datetime.datetime.utcnow().isoformat()) ) return",
"generated_by)) if \"url\" in work_form and len(work_form.url.data) > 0: self.graph.add((work_iri, SCHEMA.url, rdflib.URIRef(work_form.url.data))) if",
"0: message[\"Cc\"] = ','.join(carbon_copy) recipients.extend(carbon_copy) body = MIMEText(text, _charset=\"UTF-8\") message.attach(body) if work_graph: work_turtle",
"kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') connection = config_manager.conns generated_by = kwargs.get(\"generated_by\") work_form = kwargs.get(\"work_form\")",
"data=raw_turtle, format='turtle') if content.name.startswith(\"creative-works\"): self.creative_works_git = content self.creative_works.parse( data=raw_turtle, format='turtle') self.graph_hashes[\"creative_works\"] = hashlib.sha1(",
"not None: dept_year_iri = rdflib.URIRef(dept_year_iri) title = kwargs.get(\"title-iri\") profile.graph.add( (dept_year_iri, rdflib.URIRef(title), person_iri)) statement",
"= rdflib.URIRef(dept_year_iri) title = kwargs.get(\"title-iri\") profile.graph.add( (dept_year_iri, rdflib.URIRef(title), person_iri)) statement = kwargs.get(\"statement\", form.get(\"research_stmt\"))",
"work_iri = rdflib.URIRef(work_form.doi.data) else: work_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) self.graph.add((work_iri, SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri,",
"statement_iri, generated_by) else: statement_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri,",
"kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') profile = EmailProfile(config) connection = config_manager.conns BF = config_manager.nsm.bf",
"> 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = person_iri msg = \"{} made",
"page_string.format(page_end) col_2.append(span) div.append(col_2) col_3 = soup.new_tag(\"div\", **{\"class\": \"col-4\"}) iri = citation.get(\"iri\") if iri:",
"utilities to populate and save to datastore\"\"\" config = kwargs.get(\"config\") profile = EmailProfile(config)",
"fast_label = row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject",
"config self.triplestore_url = self.config.get(\"TRIPLESTORE_URL\") self.graph = rdflib.Graph() self.graph.namespace_manager.bind(\"bf\", BF) self.graph.namespace_manager.bind(\"cite\", CITE) self.graph.namespace_manager.bind(\"schema\", SCHEMA)",
"recipients = list(set(recipients)) # Quick dedup server.sendmail(sender, recipients, message.as_string()) server.close() def generate_citation_html(citation): soup",
"revised_by) email_subject = 'Edited Creative Work {}'.format(citation.iri) __email_work__(graph=temp_work, config=config, carbon_copy=[current_user_email,], subject=email_subject, text=\"Edited {}",
"= rdflib.BNode() self.graph.add((work_iri, SCHEMA.partOf, book_bnode)) self.__add_book__(book_bnode, work_form) elif citation_type.startswith(\"book\"): self.__add_book__(work_iri, work_form) else: abort(500)",
"subprocess import threading import uuid from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText",
"work_graph.value(subject=issue, predicate=SCHEMA.issueNumber) result = connection.datastore.query(\"\"\"SELECT ?issue WHERE {{ ?issue rdf:type schema:issueNumber ; schema:issueNumber",
"for row in config_mgr.get(\"CONNECTIONS\"): if row.get(\"name\").startswith(\"datastore\"): for directory_row in row.get(\"data_upload\"): data_upload.append(directory_row[1]) # Pull",
"self.__get_content__(\"scholarship_repo\", content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git",
"hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"] = hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"] = hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest() def __get_content__(self, repo_name,",
"def run(self): # Function iterates and commits any changes to self.__update_fast_subjects__() self.__update_research_statements__() self.__save_graph__(",
"(person_iri, rdflib.RDF.type, BF.Person.rdflib)) given_name = form.get(\"given_name\") if given_name is not None: profile.graph.add( (person_iri,",
"run(self): # Function iterates and commits any changes to self.__update_fast_subjects__() self.__update_research_statements__() self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\",",
"logins and administor \"\"\" work_graph = kwargs.get(\"graph\") config = kwargs.get(\"config\") sender = config.get('EMAIL')['user']",
"result and len(result) > 0: new_issue = rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph, issue, new_issue) def add_creative_work(**kwargs):",
"= kwargs.get('form') current_user = kwargs.get(\"current_user\") output = '' person_iri = rdflib.URIRef(form.get(\"iri\")) profile =",
"work_graph(rdflib.Graph): RDF Graph of Citation config: Configuration includes logins and administor \"\"\" work_graph",
"Statement for {}\".format( action, person_label)) self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\")",
"import add_qualified_generation, add_qualified_revision BF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE = rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV = rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA",
"self.current_year_git = content self.current_year.parse(data=raw_turtle, format='turtle') if content.name.startswith(\"cc-people\"): self.cc_people_git = content self.cc_people.parse(data=raw_turtle, format='turtle') self.graph_hashes[\"cc_people\"]",
"from bs4 import BeautifulSoup from flask import current_app from github import Github, GithubException",
"volume, issue = None, None volume_or_issue = work_graph.value(predicate=SCHEMA.partOf, object=entity) schema_class = work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type)",
"self.cc_people.parse(data=raw_turtle, format='turtle') self.graph_hashes[\"cc_people\"] = hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"] = hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest() # Start retrieving",
"= kwargs.get('text') carbon_copy = kwargs.get(\"carbon_copy\", []) message = MIMEMultipart() message[\"From\"] = sender message[\"Subject\"]",
"__add_article__(self, work_iri, work_form): \"\"\"Article specific data added to creative work Args: work_iri(rdflib.URIRef): Creative",
"message.as_string()) server.close() def generate_citation_html(citation): soup = BeautifulSoup(\"\", 'lxml') div = soup.new_tag(\"div\", **{\"class\": \"row\"})",
"profile.graph.add((person_iri, SCHEMA.familyName.rdflib, rdflib.Literal(family_name, lang=\"en\"))) label = \"{} {}\".format(given_name, family_name) profile.graph.add((person_iri, rdflib.RDFS.label, rdflib.Literal(label, lang=\"en\")))",
"predicate=SCHEMA.description) if new_description is not None \\ and str(current_description) != str(new_description): self.research_statements.remove((existing_stmt, SCHEMA.description,",
"\"{0}\")) }}\"\"\".format(label)) if result and len(result) > 0: periodical = result[0].get(\"periodical\").get(\"value\") if periodical",
"as ?periodical) BIND(\"{2}\" as ?issue_number) }}\"\"\".format(volume, periodical, issue_number) ) if result and len(result)",
"email_server.starttls() body = MIMEText(body, _charset=\"UTF-8\") message.attach(body) graph_turtle = io.StringIO( self.graph.serialize(format='turtle').decode()) attachment = MIMEText(graph_turtle.read())",
"\"journal_title\" in citation: em = soup.new_tag(\"em\") em.string = citation.get(\"journal_title\") col_2.append(em) if \"year\" in",
"email_server.close() def __add_article__(self, work_iri, work_form): \"\"\"Article specific data added to creative work Args:",
"issue_number and len(issue_number ) > 0: span = soup.new_tag(\"span\") span.string = \" no.",
"rdflib.RDFS.label, rdflib.Literal(label, lang=\"en\"))) email = form.get(\"email\") profile.graph.add((person_iri, SCHEMA.email.rdflib, rdflib.Literal(email))) add_qualified_generation(profile.graph, person_iri, generated_by) dept_year",
"to creative work Args: work_iri(rdflib.URIRef): Creative Work IRI for Article work_form(Flask.request.form): Dict of",
"self.person_iri = kwargs.get(\"person\") self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.profile = kwargs.get(\"profile\") self.scholarship_repo",
"rdflib.BNode() self.graph.add((work_iri, SCHEMA.partOf, book_bnode)) self.__add_book__(book_bnode, work_form) elif citation_type.startswith(\"book\"): self.__add_book__(work_iri, work_form) else: abort(500) if",
"generated_by = kwargs.get(\"generated_by\") work_form = kwargs.get(\"work_form\") BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema sparql",
"message, graph.serialize(format='turtle'), git_graph.sha) def update_all(self, person_label, action=\"Add\", connection=None): self.__save_graph__( git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\", message=\"{}",
"page_end and len(page_end) > 0: span = soup.new_tag(\"span\") if \"page_start\" in citation: page_string",
"if work_form.month.data != None: self.graph.add((work_iri, CITE.month, rdflib.Literal(work_form.month.data))) def __add_book__(self, work, work_form): self.graph.add((work, rdflib.RDF.type,",
"iri = citation.get(\"iri\") if iri: edit_click = \"editCitation('{}');\".format(iri) delete_click = \"deleteCitation('{}');\".format(iri) edit_a =",
"config: Configuration includes logins and administor \"\"\" work_graph = kwargs.get(\"graph\") config = kwargs.get(\"config\")",
"= rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA = rdflib.Namespace(\"http://schema.org/\") class GitProfile(object): def __init__(self, config): self.graph_hashes = {}",
"message, graph.serialize(format='turtle'), git_graph.sha) def __update_fast_subjects__(self): existing_subjects, new_subjects = set(), set() existing_stmt = self.research_statements.value(",
"= kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') author = kwargs.get(\"author\") connection = config_manager.conns iri =",
"citation.get(\"ENTRYTYPE\") if citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-file-alt\"})) elif citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-book\"}))",
"= soup.new_tag(\"span\") if \"page_start\" in citation: page_string = \"- {}.\" else: page_string =",
"College's Scholarship Graph\".format( label, person_iri) profile.new(message) def delete_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile =",
"'attachment', filename='work.ttl') message.attach(attachment) #try: server = smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port']) server.ehlo() if config.get('EMAIL')['tls']: server.starttls() server.ehlo()",
"generate_citation_html(citation): soup = BeautifulSoup(\"\", 'lxml') div = soup.new_tag(\"div\", **{\"class\": \"row\"}) col_1 = soup.new_tag(\"div\",",
"self.graph.add( (work, SCHEMA.description, rdflib.Literal(work_form.notes.data))) def __populate_work__(self, work_form, generated_by=None): \"\"\"Populates graph with new work",
"latest changes in each repository for directory in data_upload: os.chdir(directory) result = subprocess.run(['git',",
"in work_form and len(work_form.author.data) > 0: self.person_iri = rdflib.URIRef(work_form.author.data) self.graph.add((work_iri, SCHEMA.author, self.person_iri)) elif",
"add_qualified_generation, add_qualified_revision BF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE = rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV = rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA =",
"latest RDF for current academic year # and CC people now = datetime.datetime.utcnow()",
"add_qualified_generation(profile.graph, statement_iri, generated_by) form_subjects = form.getlist(\"subjects\") new_subjects = {} for row in form_subjects:",
"[] for row in config_mgr.get(\"CONNECTIONS\"): if row.get(\"name\").startswith(\"datastore\"): for directory_row in row.get(\"data_upload\"): data_upload.append(directory_row[1]) #",
"rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) profile.update(msg) return {\"message\": msg, \"status\": True}",
"\"\"\"Adds a profile stub to scholarship graph\"\"\" config = kwargs.get(\"config\") current_user = kwargs.get(\"current_user\")",
"def new(self, message): \"\"\"Adds a new profile\"\"\" self.__send_email__(\"Add new profile\", message) def update(self,",
"added to creative work Args: work_iri(rdflib.URIRef): Creative Work IRI for Article work_form(Flask.request.form): Dict",
"col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-book\"})) under_review = soup.new_tag(\"em\") under_review.string = \"In Review\" col_1.append(under_review) div.append(col_1)",
"current_user.data.get('mail').lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0: generated_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) work_iri",
"work_form, generated_by=None): \"\"\"Populates graph with new work Args: form(Flask.request.form): Dict of form values",
"?periodical schema:name ?name . FILTER(CONTAINS(?name, \"{0}\")) }}\"\"\".format(label)) if result and len(result) > 0:",
"self.graph_hashes[\"cc_people\"] = hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"] = hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest() # Start retrieving and parsing",
"if row.get(\"name\").startswith(\"datastore\"): for directory_row in row.get(\"data_upload\"): data_upload.append(directory_row[1]) # Pull in the latest changes",
"for r in self.recipients]) message[\"Subject\"] = subject email_server = smtplib.SMTP( self.email.get(\"host\"), self.email.get(\"port\")) email_server.ehlo()",
"work_form.isbn.data is not None: self.graph.add((work, SCHEMA.isbn, rdflib.Literal(work_form.isbn.data))) if work_form.editionStatement.data is not None: self.graph.add(",
"abort(500) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) return work_iri def add(self, work_form,",
"to research statements and fast subjects self.research_statements.add((existing_stmt, SCHEMA.about, subject)) self.fast_subjects.add((subject, rdflib.RDF.type, BF.Topic)) subject_label",
"person_iri) profile.new(message) def delete_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user = kwargs.get(\"current_user\")",
"for {}\".format( action, person_label)) self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") self.__save_graph__(",
"citation_type.startswith(\"book chapter\"): self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter)) book_bnode = rdflib.BNode() self.graph.add((work_iri, SCHEMA.partOf, book_bnode)) self.__add_book__(book_bnode, work_form)",
"soup.new_tag(\"span\") span.string = \"({0})\".format(citation.get(\"year\")) col_2.append(span) vol_number = citation.get(\"volume_number\") if vol_number and len(vol_number) >",
"Statement for {}\".format(label), lang=\"en\"))) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\")))",
"graph = getattr(self, graph_name) message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) git_graph = getattr(self, \"{}_git\".format(graph_name))",
"if branch: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def",
"rdflib.URIRef( email_results[0].get(\"person\").get('value')) temp_work = rdflib.Graph() temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for prefix, namespace in git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix,",
"**{\"class\": \"btn btn-danger\", \"onclick\": delete_click, \"type=\": \"input\"}) delete_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-trash-alt\"})) col_3.append(delete_a) div.append(col_3)",
"BF) self.graph.namespace_manager.bind(\"cite\", CITE) self.graph.namespace_manager.bind(\"schema\", SCHEMA) self.graph.namespace_manager.bind(\"prov\", PROV) self.email = config.get(\"EMAIL\") self.recipients = config.get(\"ADMINS\")",
"self.fast_subjects.add((subject, rdflib.RDFS.label, subject_label)) def __update_research_statements__(self): existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) current_description = self.research_statements.value(",
"btn-warning disabled\", \"onclick\": edit_click, \"type=\": \"input\"}) edit_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-edit\"})) col_3.append(edit_a) delete_a =",
"rdflib.RDF.type, BF.Person.rdflib)) given_name = form.get(\"given_name\") if given_name is not None: profile.graph.add( (person_iri, SCHEMA.givenName.rdflib,",
"work has been submitted for review\", \"status\": True} def update_profile(**kwargs): \"\"\"Updates existing triples",
"start_year = now.year - 1 end_year = now.year else: start_year = now.year end_year",
"form.get(\"research_stmt\")) if statement is not None: statement_iri = rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type,",
"for Scholarship App\"\"\" __author__ = \"<NAME>\" import base64 import bibcat import datetime import",
"SUBJECTS_IRI, RESEARCH_STMT_IRI from .sparql import add_qualified_generation, add_qualified_revision BF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE = rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")",
"and current_user_email: sparql = EMAIL_LOOKUP.format( current_user_email.lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0:",
"message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def update_all(self, person_label, action=\"Add\",",
"MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='work.ttl') message.attach(attachment) #try: server = smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port']) server.ehlo() if config.get('EMAIL')['tls']:",
"0: self.person_iri = rdflib.URIRef(work_form.author.data) self.graph.add((work_iri, SCHEMA.author, self.person_iri)) elif generated_by: self.person_iri = generated_by self.graph.add((work_iri,",
"**kwargs): git_repo = kwargs.get(\"git_repo\") file_path = kwargs.get(\"file_path\") graph_name = kwargs.get(\"graph_name\") branch = kwargs.get(\"branch\")",
"blob = repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) return raw_turtle def __save_graph__(self, **kwargs): git_repo =",
"if existing_label is None: profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\")))",
"existing_label = profile.fast_subjects.value( subject=iri_subject, predicate=rdflib.RDFS.label) if existing_label is None: profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib))",
"if work_form.editionStatement.data is not None: self.graph.add( (work, SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data))) if work_form.editor.data is not",
"{}'s academic profile:\\n\".format( generated_by, form['label']) statement_iri_results = connection.datastore.query( RESEARCH_STMT_IRI.format( person_iri)) if len(statement_iri_results) >",
"EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI from .sparql import add_qualified_generation, add_qualified_revision BF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE =",
"def __email_work__(**kwargs): \"\"\"Function takes a work graph and configuration and emails the graph",
"= MIMEMultipart() message[\"From\"] = sender message[\"Subject\"] = subject message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r",
"= cc_github.get_organization(\"Tutt-Library\") self.statement_msg = kwargs.get(\"msg\") self.person_iri = kwargs.get(\"person\") self.research_statements = rdflib.Graph() self.fast_subjects =",
"fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri] = fast_label for fast_subject,",
"__update_research_statements__(self): existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) current_description = self.research_statements.value( subject=existing_stmt, predicate=SCHEMA.description) new_description =",
"self.__add_book__(work_iri, work_form) else: abort(500) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) return work_iri",
"recipients, message.as_string()) server.close() def generate_citation_html(citation): soup = BeautifulSoup(\"\", 'lxml') div = soup.new_tag(\"div\", **{\"class\":",
"else: span = soup.new_tag(\"span\") span.string = name col_2.append(span) if \"journal_title\" in citation: em",
"graph = getattr(self, graph_name) graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest() if graph_sha1 == self.graph_hashes[graph_name]: return git_graph",
"soup.new_tag(\"em\") under_review.string = \"In Review\" col_1.append(under_review) div.append(col_1) col_2 = soup.new_tag(\"div\", **{\"class\": \"col-7\"}) if",
"SCHEMA) self.graph.namespace_manager.bind(\"prov\", PROV) self.email = config.get(\"EMAIL\") self.recipients = config.get(\"ADMINS\") self.person_iri = person_iri def",
"self.current_year.serialize(format='n3')).hexdigest() # Start retrieving and parsing latest RDF for creative works, # research",
"= name col_2.append(work_link) else: span = soup.new_tag(\"span\") span.string = name col_2.append(span) if \"journal_title\"",
"message) def __email_work__(**kwargs): \"\"\"Function takes a work graph and configuration and emails the",
"citation = utilities.Book_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_book() citation.add_book() if revised_by: add_qualified_revision(temp_work, rdflib.URIRef(citation.iri),",
"journal = rdflib.BNode() self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical)) self.graph.add((journal, SCHEMA.name, rdflib.Literal(work_form.journal_title.data))) issue, volume = None,",
"directory_row in row.get(\"data_upload\"): data_upload.append(directory_row[1]) # Pull in the latest changes in each repository",
"fa-book\"})) under_review = soup.new_tag(\"em\") under_review.string = \"In Review\" col_1.append(under_review) div.append(col_1) col_2 = soup.new_tag(\"div\",",
"= connection.datastore.query(\"\"\"SELECT ?periodical WHERE {{ ?periodical schema:name ?name . FILTER(CONTAINS(?name, \"{0}\")) }}\"\"\".format(label)) if",
"statement_iri = rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph, statement_iri, generated_by) else: statement_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add(",
"Args: form(Flask.request.form): Dict of form values \"\"\" if len(work_form.iri.data) > 0: work_iri =",
"rdflib.RDF.type, SCHEMA.PublicationIssue)) self.graph.add((issue, SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data))) if volume is not None: self.graph.add((issue, SCHEMA.partOf, volume))",
"rdflib.URIRef(profile.add(work_form, generated_by)) #profile.update(\"Added or Updated Creative Work\") return {\"message\": \"New work has been",
"generated_by) else: statement_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib,",
"work_form) else: abort(500) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) return work_iri def",
"break volume, issue = None, None volume_or_issue = work_graph.value(predicate=SCHEMA.partOf, object=entity) schema_class = work_graph.value(subject=volume_or_issue,",
"config=self.config, msg=message, person=self.person_iri, profile=self) BACKGROUND_THREAD.start() self.__send_email__(\"Updating Profile\", message) def __email_work__(**kwargs): \"\"\"Function takes a",
"SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) form_subjects = form.getlist(\"subjects\") new_subjects = {} for row in form_subjects:",
"self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"] = hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest() def __get_content__(self, repo_name, content): raw_turtle = None try:",
"current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') connection = config_manager.conns generated_by = kwargs.get(\"generated_by\") work_form",
"= citation.get(\"title\") if \"url\" in citation: work_link = soup.new_tag(\"a\", href=citation.get(\"url\")) work_link.string = name",
"email_body += \"\\n{}:\\t{}\".format(row, field.data) self.__send_email__(\"Added New Work\", email_body) return work_iri def new(self, message):",
"parsing latest RDF for current academic year # and CC people now =",
"self.graph_hashes[\"fast_subjects\"] = hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest() def __get_content__(self, repo_name, content): raw_turtle = None try: raw_turtle",
"CITE) self.graph.namespace_manager.bind(\"schema\", SCHEMA) self.graph.namespace_manager.bind(\"prov\", PROV) self.email = config.get(\"EMAIL\") self.recipients = config.get(\"ADMINS\") self.person_iri =",
"as last resort self.graph.add((work_iri, SCHEMA.partOf, journal)) if work_form.month.data != None: self.graph.add((work_iri, CITE.month, rdflib.Literal(work_form.month.data)))",
"\"fas fa-file-alt\"})) elif citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-book\"})) under_review = soup.new_tag(\"em\") under_review.string =",
"new_subjects = set(), set() existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) for row in self.research_statements.objects(",
"\"input\"}) delete_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-trash-alt\"})) col_3.append(delete_a) div.append(col_3) return div.prettify() def __reconcile_article__(work_graph, connection): SCHEMA",
"self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle = self.__get_content__(\"scholarship_repo\", content) if content.name.startswith(\"cc-research-statements\"):",
"args: work_graph(rdflib.Graph): RDF Graph of Citation config: Configuration includes logins and administor \"\"\"",
"current academic year # and CC people now = datetime.datetime.utcnow() if now.month <",
"name = citation.get(\"article_title\") elif \"title\" in citation: name = citation.get(\"title\") if \"url\" in",
"App\"\"\" __author__ = \"<NAME>\" import base64 import bibcat import datetime import hashlib import",
"people now = datetime.datetime.utcnow() if now.month < 7: start_year = now.year - 1",
"{}\".format(graph_name)) graph = getattr(self, graph_name) graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest() if graph_sha1 == self.graph_hashes[graph_name]: return",
"> 0: new_issue = rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph, issue, new_issue) def add_creative_work(**kwargs): \"\"\"Calls utilities to",
"server.ehlo() server.login(sender, config.get(\"EMAIL\")[\"password\"]) recipients = list(set(recipients)) # Quick dedup server.sendmail(sender, recipients, message.as_string()) server.close()",
"= rdflib.BNode() self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume)) self.graph.add((volume, SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume, SCHEMA.partOf, journal)) if work_form.issue_number.data",
"page_string = \"- {}.\" else: page_string = \"{}.\" span.string = page_string.format(page_end) col_2.append(span) div.append(col_2)",
"citation.get(\"iri\") if iri: edit_click = \"editCitation('{}');\".format(iri) delete_click = \"deleteCitation('{}');\".format(iri) edit_a = soup.new_tag(\"a\", **{\"class\":",
"\"<NAME>\" import base64 import bibcat import datetime import hashlib import io import os",
"}} OPTIONAL {{ ?issue schema:partOf ?periodical . }} BIND(<{0}> as ?volume) BIND(<{1}> as",
"span = soup.new_tag(\"span\") span.string = \" no. {}\".format(issue_number) col_2.append(span) page_start = citation.get(\"page_start\") if",
"> 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = None form = kwargs.get(\"form\") if",
"content self.cc_people.parse(data=raw_turtle, format='turtle') self.graph_hashes[\"cc_people\"] = hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"] = hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest() # Start",
"generated_by = None form = kwargs.get(\"form\") if form.get(\"orcid\"): person_uri = form.get(\"orcid\") else: person_uri",
"SCHEMA.partOf, volume)) else: # Add work_iri to Journal as last resort self.graph.add((work_iri, SCHEMA.partOf,",
"subject)) for subject in list(new_subjects.difference(existing_subjects)): # Add new subject to research statements and",
"kwargs.get(\"config\") current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') profile = EmailProfile(config) connection = config_manager.conns",
"edit_click = \"editCitation('{}');\".format(iri) delete_click = \"deleteCitation('{}');\".format(iri) edit_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-warning disabled\",",
"add_qualified_generation(profile.graph, person_iri, generated_by) dept_year = kwargs.get(\"year-iri\") if dept_year is not None: dept_year_iri =",
". FILTER(CONTAINS(?name, \"{0}\")) }}\"\"\".format(label)) if result and len(result) > 0: periodical = result[0].get(\"periodical\").get(\"value\")",
"by {} on {}\".format( iri, author, current_user.data.get('mail'), datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Deletion of",
"list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt, SCHEMA.about, subject)) for subject in list(new_subjects.difference(existing_subjects)): # Add new subject to",
"profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) message = \"New {} as {} to Colorado",
"Creative Work {}'.format(citation.iri) __email_work__(graph=temp_work, config=config, carbon_copy=[current_user_email,], subject=email_subject, text=\"Edited {} revised by {} on",
"and len(vol_number) > 0: span = soup.new_tag(\"span\") span.string = \"v. {}\".format(vol_number) col_2.append(span) issue_number",
"len(email_results) > 0: generated_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) work_iri = rdflib.URIRef(profile.add(work_form, generated_by)) #profile.update(\"Added or",
"CC People\".format(action, person_label)) self.__save_graph__( git_repo=self.tiger_repo, file_path=self.current_year_path, graph_name=\"current_year\", message=\"{} person to Department for school",
"data added to creative work Args: work_iri(rdflib.URIRef): Creative Work IRI for Article work_form(Flask.request.form):",
"if periodical != str(entity): new_work = rdflib.URIRef(periodical) bibcat.replace_iri(work_graph, entity, new_work) entity = new_work",
"self.__save_graph__( file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") class EmailProfile(object): \"\"\"Simple Email Profile class",
"import os import pprint import smtplib import subprocess import threading import uuid from",
"takes a work graph and configuration and emails the graph in turtle format",
"Function iterates and commits any changes to self.__update_fast_subjects__() self.__update_research_statements__() self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=self.statement_msg)",
"fa-file-alt\"})) elif citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-book\"})) under_review = soup.new_tag(\"em\") under_review.string = \"In",
"import pprint import smtplib import subprocess import threading import uuid from email.mime.multipart import",
"content self.creative_works.parse( data=raw_turtle, format='turtle') self.graph_hashes[\"creative_works\"] = hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"] = hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"]",
"connection: self.__reload_triplestore__(connection) def __reload_triplestore__(self, config_mgr): data_upload = [] for row in config_mgr.get(\"CONNECTIONS\"): if",
"journal)) if work_form.issue_number.data != None: issue = rdflib.BNode() self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue)) self.graph.add((issue, SCHEMA.issueNumber,",
"> 0: span = soup.new_tag(\"span\") span.string = \"p. {}\".format(page_start) col_2.append(span) page_end = citation.get(\"page_end\")",
"temp_work, git_profile.cc_people, False) citation.populate() citation.populate_book() citation.add_book() if revised_by: add_qualified_revision(temp_work, rdflib.URIRef(citation.iri), revised_by) email_subject =",
"is None and current_user_email: sparql = EMAIL_LOOKUP.format( current_user_email.lower()) email_results = connection.datastore.query(sparql) if len(email_results)",
"kwargs.get('config_manager') connection = config_manager.conns revised_by = kwargs.get(\"revised_by\") raw_citation = kwargs.get(\"citation\") work_type = kwargs.get(\"work_type\",",
"= work_graph.value(subject=issue, predicate=SCHEMA.issueNumber) result = connection.datastore.query(\"\"\"SELECT ?issue WHERE {{ ?issue rdf:type schema:issueNumber ;",
"0: statement_iri = rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph, statement_iri, generated_by) else: statement_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1()))",
"dedup email_server.sendmail(self.email.get(\"user\"), recipients, message.as_string()) email_server.close() def __add_article__(self, work_iri, work_form): \"\"\"Article specific data added",
"generated_by: add_qualified_generation(self.graph, work_iri, generated_by) citation_type = work_form.citation_type.data self.graph.add((work_iri, CITE.citationType, rdflib.Literal(citation_type))) if \"author\" in",
"github import Github, GithubException import utilities from .sparql import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI from",
"form_subjects: fast_id, fast_label = row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri =",
"Colorado College's Scholarship Graph\".format( label, person_iri) profile.new(message) def delete_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile",
"str(current_description) != str(new_description): self.research_statements.remove((existing_stmt, SCHEMA.description, current_description)) self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt, SCHEMA.description, new_description)) def run(self): #",
"person_label)) self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/creative-works.ttl\",",
"profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) message = \"New {}",
"email_server.ehlo() if self.email.get(\"tls\"): email_server.starttls() body = MIMEText(body, _charset=\"UTF-8\") message.attach(body) graph_turtle = io.StringIO( self.graph.serialize(format='turtle').decode())",
"review\", \"status\": True, \"iri\": work_iri} def add_profile(**kwargs): \"\"\"Adds a profile stub to scholarship",
"fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject = rdflib.URIRef(fast_uri) profile.graph.add( (statement_iri, SCHEMA.about.rdflib,",
"try: raw_turtle = content.decoded_content except GithubException: repo = getattr(self, repo_name) blob = repo.get_git_blob(content.sha)",
"= rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph, statement_iri, generated_by) else: statement_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add( (statement_iri,",
"work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) return work_iri def add(self, work_form, generated_by=None): work_iri",
"config=config, carbon_copy=[current_user_email,], subject=email_subject, text=\"Edited {} revised by {} on {}, see attached RDF",
"self.__get_content__(\"tiger_repo\", content) if content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git = content self.current_year.parse(data=raw_turtle, format='turtle') if content.name.startswith(\"cc-people\"): self.cc_people_git =",
"SCHEMA = rdflib.Namespace(\"http://schema.org/\") for row in work_graph.query( \"\"\"SELECT ?entity ?label WHERE { ?entity",
"added\") self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/creative-works.ttl\", graph_name=\"creative_works\", message=\"Creative Works added\") if connection: self.__reload_triplestore__(connection) def",
"= soup.new_tag(\"div\", **{\"class\": \"col-1\"}) citation_type = citation.get(\"ENTRYTYPE\") if citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-file-alt\"}))",
"branch = kwargs.get(\"branch\") graph_name = kwargs.get(\"graph_name\") graph = getattr(self, graph_name) message = kwargs.get(\"message\",",
"administrators with attached profile graph\"\"\" message = MIMEMultipart() message[\"From\"] = self.email.get(\"user\") message[\"To\"] =",
"is not None: profile.graph.add( (person_iri, SCHEMA.givenName.rdflib, rdflib.Literal(given_name, lang=\"en\"))) family_name = form.get(\"family_name\") if family_name",
"0: revised_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) temp_work = rdflib.Graph() temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for prefix, namespace",
"now.year end_year = now.year + 1 self.current_year_path = \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year, end_year) self.current_year =",
"= content self.fast_subjects.parse( data=raw_turtle, format='turtle') def __save_graph__(self, **kwargs): file_path = kwargs.get(\"file_path\") branch =",
"= rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE = rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV = rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA = rdflib.Namespace(\"http://schema.org/\") class GitProfile(object):",
"\"{}.\" span.string = page_string.format(page_end) col_2.append(span) div.append(col_2) col_3 = soup.new_tag(\"div\", **{\"class\": \"col-4\"}) iri =",
"= sender message[\"Subject\"] = subject message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in recipients]) if",
"added\") class EmailProfile(object): \"\"\"Simple Email Profile class that creates a local RDF graph",
"\"col-4\"}) iri = citation.get(\"iri\") if iri: edit_click = \"editCitation('{}');\".format(iri) delete_click = \"deleteCitation('{}');\".format(iri) edit_a",
"before adding to production. Keyword args: work_graph(rdflib.Graph): RDF Graph of Citation config: Configuration",
"utilities.Book_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_book() citation.add_book() if revised_by: add_qualified_revision(temp_work, rdflib.URIRef(citation.iri), revised_by) email_subject",
"BIND(\"{1}\" as ?volumeNumber) }}\"\"\".format(entity, vol_num)) if result and len(result) > 0: new_volume =",
"import utilities from .sparql import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI from .sparql import add_qualified_generation, add_qualified_revision",
"new_volume = rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph, volume, new_volume) if issue is not None: issue_number =",
"been submitted for review\", \"status\": True} def update_profile(**kwargs): \"\"\"Updates existing triples based on",
"= rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) self.graph.add((work_iri, SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri, CITE.authorString, rdflib.Literal(work_form.author_string.data))) if generated_by: add_qualified_generation(self.graph,",
"git_graph.sha, branch=branch) else: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def __update_fast_subjects__(self): existing_subjects, new_subjects = set(),",
"btn-danger\", \"onclick\": delete_click, \"type=\": \"input\"}) delete_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-trash-alt\"})) col_3.append(delete_a) div.append(col_3) return div.prettify()",
"values\"\"\" config_manager = kwargs.get('config_manager') connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema",
"in data_upload: os.chdir(directory) result = subprocess.run(['git', 'pull', 'origin', 'master']) click.echo(result.returncode, result.stdout) config_mgr.conns.datastore.mgr.reset() class",
"None: self.graph.add((work_iri, SCHEMA.partOf, volume)) else: # Add work_iri to Journal as last resort",
"generated_by=None): work_iri = self.__populate_work__(work_form, generated_by) email_body = \"Properties and Values for Creative Work",
"rdflib.URIRef(work_form.iri.data) else: # Mint IRI for new work if \"doi\" in work_form and",
"edit_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-edit\"})) col_3.append(edit_a) delete_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-danger\", \"onclick\": delete_click,",
"generated_by=None): \"\"\"Populates graph with new work Args: form(Flask.request.form): Dict of form values \"\"\"",
"= \"In Review\" col_1.append(under_review) div.append(col_1) col_2 = soup.new_tag(\"div\", **{\"class\": \"col-7\"}) if \"article_title\" in",
"; schema:name ?label . } \"\"\"): entity, label = row break volume, issue",
"= row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject =",
"0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = person_iri msg = \"{} made the",
"if self.email.get(\"tls\"): email_server.starttls() body = MIMEText(body, _charset=\"UTF-8\") message.attach(body) graph_turtle = io.StringIO( self.graph.serialize(format='turtle').decode()) attachment",
"\"\"\"Profiles for Scholarship App\"\"\" __author__ = \"<NAME>\" import base64 import bibcat import datetime",
"work_form.provisionActivityStatement.data is not None: self.graph.add( (work, SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data))) if work_form.notes.data is not None:",
"self.statement_msg = kwargs.get(\"msg\") self.person_iri = kwargs.get(\"person\") self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.profile",
"if \"author\" in work_form and len(work_form.author.data) > 0: self.person_iri = rdflib.URIRef(work_form.author.data) self.graph.add((work_iri, SCHEMA.author,",
"or Updated Creative Work\") return {\"message\": \"New work has been submitted for review\",",
"as ?volumeNumber) }}\"\"\".format(entity, vol_num)) if result and len(result) > 0: new_volume = rdflib.URIRef(result[0].get(\"volume\").get(\"value\"))",
"if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') def __save_graph__(self, **kwargs): file_path =",
"= MIMEMultipart() message[\"From\"] = self.email.get(\"user\") message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in self.recipients]) message[\"Subject\"]",
"file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=self.statement_msg) self.__save_graph__( file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") class EmailProfile(object): \"\"\"Simple",
"if content.name.startswith(\"cc-people\"): self.cc_people_git = content self.cc_people.parse(data=raw_turtle, format='turtle') self.graph_hashes[\"cc_people\"] = hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"] =",
"msg=message, person=self.person_iri, profile=self) BACKGROUND_THREAD.start() self.__send_email__(\"Updating Profile\", message) def __email_work__(**kwargs): \"\"\"Function takes a work",
"if config.get('EMAIL')['tls']: server.starttls() server.ehlo() server.login(sender, config.get(\"EMAIL\")[\"password\"]) recipients = list(set(recipients)) # Quick dedup server.sendmail(sender,",
"is not None: self.graph.add( (work, SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data))) if work_form.editor.data is not None: self.graph.add((work,",
"attachment = MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='profile.ttl') message.attach(attachment) email_server.login( self.email.get(\"user\"), self.email.get(\"password\")) recipients = list(set(self.recipients))",
"self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume)) self.graph.add((volume, SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume, SCHEMA.partOf, journal)) if work_form.issue_number.data != None:",
"rdflib.Literal(work_form.author_string.data))) if generated_by: add_qualified_generation(self.graph, work_iri, generated_by) citation_type = work_form.citation_type.data self.graph.add((work_iri, CITE.citationType, rdflib.Literal(citation_type))) if",
"Values for Creative Work {}\".format(work_iri) for row in work_form._fields: if row.startswith(\"csrf_token\"): continue field",
"self.__reload_triplestore__(connection) def __reload_triplestore__(self, config_mgr): data_upload = [] for row in config_mgr.get(\"CONNECTIONS\"): if row.get(\"name\").startswith(\"datastore\"):",
"work_type.startswith(\"book\"): citation = utilities.Book_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_book() citation.add_book() if revised_by: add_qualified_revision(temp_work,",
"None: self.graph.add( (work, SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data))) if work_form.editor.data is not None: self.graph.add((work, SCHEMA.editor, rdflib.Literal(work_form.editor.data)))",
"work_graph = kwargs.get(\"graph\") config = kwargs.get(\"config\") sender = config.get('EMAIL')['user'] recipients = config.get(\"ADMINS\") subject",
"content.name.startswith(\"creative-works\"): self.creative_works_git = content self.creative_works.parse( data=raw_turtle, format='turtle') self.graph_hashes[\"creative_works\"] = hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"] =",
"in self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle = self.__get_content__(\"scholarship_repo\", content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle,",
"BACKGROUND_THREAD = ProfileUpdateThread( config=self.config, msg=message, person=self.person_iri, profile=self) BACKGROUND_THREAD.start() self.__send_email__(\"Updating Profile\", message) def __email_work__(**kwargs):",
"email_results = connection.datastore.query(sparql) if len(email_results) > 0: revised_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) temp_work =",
"self.fast_subjects.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"creative-works\"): self.creative_works_git = content self.creative_works.parse( data=raw_turtle, format='turtle') self.graph_hashes[\"creative_works\"] =",
"\"\"\"Edits existing profile\"\"\" global BACKGROUND_THREAD BACKGROUND_THREAD = ProfileUpdateThread( config=self.config, msg=message, person=self.person_iri, profile=self) BACKGROUND_THREAD.start()",
"profile graph\"\"\" message = MIMEMultipart() message[\"From\"] = self.email.get(\"user\") message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r",
"row in self.research_statements.objects( subject=existing_stmt, predicate=SCHEMA.about): existing_subjects.add(row) for fast_heading in self.profile.graph.objects( subject=existing_stmt, predicate=SCHEMA.about): new_subjects.add(fast_heading)",
"def __update_research_statements__(self): existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) current_description = self.research_statements.value( subject=existing_stmt, predicate=SCHEMA.description) new_description",
"predicate=SCHEMA.accountablePerson, object=self.person_iri) current_description = self.research_statements.value( subject=existing_stmt, predicate=SCHEMA.description) new_description = self.profile.graph.value( subject=existing_stmt, predicate=SCHEMA.description) if",
"self.current_year_path = \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year, end_year) self.current_year = rdflib.Graph() self.cc_people = rdflib.Graph() self.tiger_repo =",
"= subprocess.run(['git', 'pull', 'origin', 'master']) click.echo(result.returncode, result.stdout) config_mgr.conns.datastore.mgr.reset() class ProfileUpdateThread(threading.Thread): def __init__(self, **kwargs):",
"self.email.get(\"user\") message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in self.recipients]) message[\"Subject\"] = subject email_server =",
"existing profile\"\"\" global BACKGROUND_THREAD BACKGROUND_THREAD = ProfileUpdateThread( config=self.config, msg=message, person=self.person_iri, profile=self) BACKGROUND_THREAD.start() self.__send_email__(\"Updating",
"else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri] = fast_label for fast_subject, fast_label in new_subjects.items(): iri_subject",
"generated_by) dept_year = kwargs.get(\"year-iri\") if dept_year is not None: dept_year_iri = rdflib.URIRef(dept_year_iri) title",
"message[\"Subject\"] = subject message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in recipients]) if len(carbon_copy) >",
"= \"<NAME>\" import base64 import bibcat import datetime import hashlib import io import",
"{}.\" else: page_string = \"{}.\" span.string = page_string.format(page_end) col_2.append(span) div.append(col_2) col_3 = soup.new_tag(\"div\",",
"self.graph.add((work_iri, SCHEMA.partOf, journal)) if work_form.month.data != None: self.graph.add((work_iri, CITE.month, rdflib.Literal(work_form.month.data))) def __add_book__(self, work,",
"content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') if",
"work_type.startswith(\"article\"): citation = utilities.Article_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_article() citation.add_article() elif work_type.startswith(\"book\"): citation",
"self.current_year = rdflib.Graph() self.cc_people = rdflib.Graph() self.tiger_repo = self.tutt_github.get_repo(\"tiger-catalog\") for content in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"):",
"for subject in list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt, SCHEMA.about, subject)) for subject in list(new_subjects.difference(existing_subjects)): # Add",
"self.graph.add((volume, SCHEMA.partOf, journal)) if work_form.issue_number.data != None: issue = rdflib.BNode() self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue))",
"= kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') profile = EmailProfile(config) connection = config_manager.conns BF =",
"= fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri] = fast_label for fast_subject, fast_label in",
"subject, body): \"\"\"Sends email to administrators with attached profile graph\"\"\" message = MIMEMultipart()",
"SCHEMA.familyName.rdflib, rdflib.Literal(family_name, lang=\"en\"))) label = \"{} {}\".format(given_name, family_name) profile.graph.add((person_iri, rdflib.RDFS.label, rdflib.Literal(label, lang=\"en\"))) email",
"citation_type.startswith(\"article\"): self.__add_article__(work_iri, work_form) elif citation_type.startswith(\"book chapter\"): self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter)) book_bnode = rdflib.BNode() self.graph.add((work_iri,",
"{\"message\": \"New work has been submitted for review\", \"status\": True, \"iri\": work_iri} def",
"self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"] = hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"] = hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest() def __get_content__(self, repo_name, content):",
"= person_iri msg = \"{} made the following changes to {}'s academic profile:\\n\".format(",
"connection.datastore.query(\"\"\"SELECT ?periodical WHERE {{ ?periodical schema:name ?name . FILTER(CONTAINS(?name, \"{0}\")) }}\"\"\".format(label)) if result",
"SCHEMA.PublicationIssue)) self.graph.add((issue, SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data))) if volume is not None: self.graph.add((issue, SCHEMA.partOf, volume)) else:",
"def __get_content__(self, repo_name, content): raw_turtle = None try: raw_turtle = content.decoded_content except GithubException:",
"SCHEMA = config_manager.nsm.schema sparql = EMAIL_LOOKUP.format( current_user.data.get('mail').lower()) email_results = connection.datastore.query(sparql) if len(email_results) >",
"issue = work_graph.value(predicate=SCHEMA.partOf, object=volume) elif schema_class is SCHEMA.issueNumber: issue = volume_or_issue result =",
"return raw_turtle def __save_graph__(self, **kwargs): git_repo = kwargs.get(\"git_repo\") file_path = kwargs.get(\"file_path\") graph_name =",
"profile.graph.add( (person_iri, SCHEMA.givenName.rdflib, rdflib.Literal(given_name, lang=\"en\"))) family_name = form.get(\"family_name\") if family_name is not None:",
"None, None if work_form.volume_number.data != None: volume = rdflib.BNode() self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume)) self.graph.add((volume,",
"raw_turtle = None try: raw_turtle = content.decoded_content except GithubException: repo = getattr(self, repo_name)",
"new_subjects[fast_uri] = fast_label for fast_subject, fast_label in new_subjects.items(): iri_subject = rdflib.URIRef(fast_subject) profile.graph.add( (statement_iri,",
"(statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) form_subjects = form.getlist(\"subjects\") new_subjects = {} for row in",
"rdflib.Literal(family_name, lang=\"en\"))) label = \"{} {}\".format(given_name, family_name) profile.graph.add((person_iri, rdflib.RDFS.label, rdflib.Literal(label, lang=\"en\"))) email =",
"if work_form.provisionActivityStatement.data is not None: self.graph.add( (work, SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data))) if work_form.notes.data is not",
"?label . } \"\"\"): entity, label = row break volume, issue = None,",
"None: profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) message = \"New",
"if content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git = content self.current_year.parse(data=raw_turtle, format='turtle') if content.name.startswith(\"cc-people\"): self.cc_people_git = content self.cc_people.parse(data=raw_turtle,",
"__init__(self, **kwargs): threading.Thread.__init__(self) config = kwargs.get(\"config\") cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.tutt_github = cc_github.get_organization(\"Tutt-Library\")",
"is send via email to the Administrators for review.\"\"\" def __init__(self, config, person_iri):",
"profile\", message) def update(self, message): \"\"\"Edits existing profile\"\"\" global BACKGROUND_THREAD BACKGROUND_THREAD = ProfileUpdateThread(",
"scholarship graph\"\"\" config = kwargs.get(\"config\") current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') profile =",
"= connection.datastore.query(sparql) if len(email_results) > 0: revised_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) temp_work = rdflib.Graph()",
"= kwargs.get(\"author\") connection = config_manager.conns iri = kwargs.get(\"iri\") __email_work__( config=config, carbon_copy=[current_user.data.get('mail'),], subject=\"Delete Request\",",
"citation {} for {}\\nrequested by {} on {}\".format( iri, author, current_user.data.get('mail'), datetime.datetime.utcnow().isoformat()) )",
"made the following changes to {}'s academic profile:\\n\".format( generated_by, form['label']) statement_iri_results = connection.datastore.query(",
"rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph, volume, new_volume) if issue is not None: issue_number = work_graph.value(subject=issue, predicate=SCHEMA.issueNumber)",
"self.__save_graph__( git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=\"{} Research Statement for {}\".format( action, person_label)) self.__save_graph__( git_repo=self.scholarship_repo,",
"= content.decoded_content except GithubException: repo = getattr(self, repo_name) blob = repo.get_git_blob(content.sha) raw_turtle =",
"span.string = \"({0})\".format(citation.get(\"year\")) col_2.append(span) vol_number = citation.get(\"volume_number\") if vol_number and len(vol_number) > 0:",
"self.graph.add((issue, SCHEMA.partOf, volume)) else: self.graph.add((issue, SCHEMA.partOf, journal)) self.graph.add((work_iri, SCHEMA.partOf, issue)) elif volume is",
"for Creative Work {}\".format(work_iri) for row in work_form._fields: if row.startswith(\"csrf_token\"): continue field =",
"?issue schema:partOf ?periodical . }} BIND(<{0}> as ?volume) BIND(<{1}> as ?periodical) BIND(\"{2}\" as",
"Start retrieving and parsing latest RDF for current academic year # and CC",
"(work, SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data))) if work_form.notes.data is not None: self.graph.add( (work, SCHEMA.description, rdflib.Literal(work_form.notes.data))) def",
"profile or editing profile that is send via email to the Administrators for",
"format='turtle') if content.name.startswith(\"cc-people\"): self.cc_people_git = content self.cc_people.parse(data=raw_turtle, format='turtle') self.graph_hashes[\"cc_people\"] = hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"]",
"and len(issue_number ) > 0: span = soup.new_tag(\"span\") span.string = \" no. {}\".format(issue_number)",
"subject=existing_stmt, predicate=SCHEMA.description) if new_description is not None \\ and str(current_description) != str(new_description): self.research_statements.remove((existing_stmt,",
"form(Flask.request.form): Dict of form values \"\"\" if len(work_form.iri.data) > 0: work_iri = rdflib.URIRef(work_form.iri.data)",
"in citation: name = citation.get(\"title\") if \"url\" in citation: work_link = soup.new_tag(\"a\", href=citation.get(\"url\"))",
"def __init__(self, config, person_iri): self.config = config self.triplestore_url = self.config.get(\"TRIPLESTORE_URL\") self.graph = rdflib.Graph()",
"\"onclick\": edit_click, \"type=\": \"input\"}) edit_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-edit\"})) col_3.append(edit_a) delete_a = soup.new_tag(\"a\", **{\"class\":",
"stub to scholarship graph\"\"\" config = kwargs.get(\"config\") current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager')",
"statement_iri = rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement",
"citation: page_string = \"- {}.\" else: page_string = \"{}.\" span.string = page_string.format(page_end) col_2.append(span)",
"\"\"\"Sends email to administrators with attached profile graph\"\"\" message = MIMEMultipart() message[\"From\"] =",
"new_work = rdflib.URIRef(periodical) bibcat.replace_iri(work_graph, entity, new_work) entity = new_work if volume is not",
"person_label, action=\"Add\", connection=None): self.__save_graph__( git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\", message=\"{} {} to CC People\".format(action, person_label))",
"GitProfile(object): def __init__(self, config): self.graph_hashes = {} cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.triplestore_url =",
"person_iri)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {} {}\".format( form.get('given_name'), form.get('family_name')), lang=\"en\"))) add_qualified_generation(",
"= work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type) if schema_class is SCHEMA.volumeNumber: volume = volume_or_issue issue = work_graph.value(predicate=SCHEMA.partOf,",
"and administor \"\"\" work_graph = kwargs.get(\"graph\") config = kwargs.get(\"config\") sender = config.get('EMAIL')['user'] recipients",
"is not None: self.graph.add((work, SCHEMA.isbn, rdflib.Literal(work_form.isbn.data))) if work_form.editionStatement.data is not None: self.graph.add( (work,",
"body = MIMEText(body, _charset=\"UTF-8\") message.attach(body) graph_turtle = io.StringIO( self.graph.serialize(format='turtle').decode()) attachment = MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition',",
"\"input\"}) edit_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-edit\"})) col_3.append(edit_a) delete_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-danger\", \"onclick\":",
"add_qualified_revision(profile.graph, statement_iri, generated_by) else: statement_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add(",
"# Function iterates and commits any changes to self.__update_fast_subjects__() self.__update_research_statements__() self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\",",
"> 0: self.person_iri = rdflib.URIRef(work_form.author.data) self.graph.add((work_iri, SCHEMA.author, self.person_iri)) elif generated_by: self.person_iri = generated_by",
"new(self, message): \"\"\"Adds a new profile\"\"\" self.__send_email__(\"Add new profile\", message) def update(self, message):",
"if vol_number and len(vol_number) > 0: span = soup.new_tag(\"span\") span.string = \"v. {}\".format(vol_number)",
"not None: statement_iri = rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, rdflib.RDFS.label,",
"if dept_year is not None: dept_year_iri = rdflib.URIRef(dept_year_iri) title = kwargs.get(\"title-iri\") profile.graph.add( (dept_year_iri,",
"= content self.creative_works.parse( data=raw_turtle, format='turtle') self.graph_hashes[\"creative_works\"] = hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"] = hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest()",
"connection.datastore.query( RESEARCH_STMT_IRI.format( person_iri)) if len(statement_iri_results) > 0: statement_iri = rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph, statement_iri,",
"generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = person_iri msg = \"{} made the following",
"entity = new_work if volume is not None: vol_num = work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber) result",
"work_form = kwargs.get(\"work_form\") BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema sparql = EMAIL_LOOKUP.format( current_user.data.get('mail').lower())",
"person_iri): self.config = config self.triplestore_url = self.config.get(\"TRIPLESTORE_URL\") self.graph = rdflib.Graph() self.graph.namespace_manager.bind(\"bf\", BF) self.graph.namespace_manager.bind(\"cite\",",
"issue = volume_or_issue result = connection.datastore.query(\"\"\"SELECT ?periodical WHERE {{ ?periodical schema:name ?name .",
"if issue_number and len(issue_number ) > 0: span = soup.new_tag(\"span\") span.string = \"",
"form.getlist(\"citations\") for uri in citations: profile.graph.add( (rdflib.URIRef(uri), SCHEMA.author.rdflib, person_iri)) statement = form.get(\"research_stmt\") if",
"fast_label = row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri]",
"self.cc_people_git = content self.cc_people.parse(data=raw_turtle, format='turtle') self.graph_hashes[\"cc_people\"] = hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"] = hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest()",
"format='turtle') if content.name.startswith(\"creative-works\"): self.creative_works_git = content self.creative_works.parse( data=raw_turtle, format='turtle') self.graph_hashes[\"creative_works\"] = hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest()",
"any changes to self.__update_fast_subjects__() self.__update_research_statements__() self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=self.statement_msg) self.__save_graph__( file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\",",
"iterates and commits any changes to self.__update_fast_subjects__() self.__update_research_statements__() self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=self.statement_msg) self.__save_graph__(",
"if form.get(\"orcid\"): person_uri = form.get(\"orcid\") else: person_uri = \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()) person_iri = rdflib.URIRef(person_uri)",
"new work Args: form(Flask.request.form): Dict of form values \"\"\" if len(work_form.iri.data) > 0:",
"None form = kwargs.get(\"form\") if form.get(\"orcid\"): person_uri = form.get(\"orcid\") else: person_uri = \"http://catalog.coloradocollege.edu/{}\".format(",
"recipients, message.as_string()) email_server.close() def __add_article__(self, work_iri, work_form): \"\"\"Article specific data added to creative",
"self.graph.serialize(format='turtle').decode()) attachment = MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='profile.ttl') message.attach(attachment) email_server.login( self.email.get(\"user\"), self.email.get(\"password\")) recipients =",
"= config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower()))",
"configuration and emails the graph in turtle format to the administrators for review",
"SCHEMA.name, rdflib.Literal(work_form.article_title.data))) if work_form.page_start.data !=None: self.graph.add((work_iri, SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data))) if work_form.page_end.data !=None: self.graph.add((work_iri, SCHEMA.pageEnd,",
"SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data))) if work_form.notes.data is not None: self.graph.add( (work, SCHEMA.description, rdflib.Literal(work_form.notes.data))) def __populate_work__(self,",
"content): raw_turtle = None try: raw_turtle = content.decoded_content except GithubException: repo = getattr(self,",
"= citation.get(\"article_title\") elif \"title\" in citation: name = citation.get(\"title\") if \"url\" in citation:",
"0: work_iri = rdflib.URIRef(work_form.doi.data) else: work_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) self.graph.add((work_iri, SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data)))",
"config.get(\"TRIPLESTORE_URL\") self.tutt_github = cc_github.get_organization(\"Tutt-Library\") # Start retrieving and parsing latest RDF for current",
"= kwargs.get(\"form\") if form.get(\"orcid\"): person_uri = form.get(\"orcid\") else: person_uri = \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()) person_iri",
"statement = form.get(\"research_stmt\") if len(statement) > 0: profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) form_subjects",
"fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject = rdflib.URIRef(fast_uri) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) profile.graph.add(",
"for row in self.research_statements.objects( subject=existing_stmt, predicate=SCHEMA.about): existing_subjects.add(row) for fast_heading in self.profile.graph.objects( subject=existing_stmt, predicate=SCHEMA.about):",
"config.get(\"ADMINS\") subject = kwargs.get('subject') text = kwargs.get('text') carbon_copy = kwargs.get(\"carbon_copy\", []) message =",
"and len(work_form.author.data) > 0: self.person_iri = rdflib.URIRef(work_form.author.data) self.graph.add((work_iri, SCHEMA.author, self.person_iri)) elif generated_by: self.person_iri",
"subject_label is not None: self.fast_subjects.add((subject, rdflib.RDFS.label, subject_label)) def __update_research_statements__(self): existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson,",
"connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema form = kwargs.get('form') current_user",
"{} to Colorado College's Scholarship Graph\".format( label, person_iri) profile.new(message) def delete_creative_work(**kwargs): config =",
"see attached RDF turtle file\".format( citation.citation_type, revised_by, datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Changes to",
"and str(current_description) != str(new_description): self.research_statements.remove((existing_stmt, SCHEMA.description, current_description)) self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt, SCHEMA.description, new_description)) def run(self):",
"def __save_graph__(self, **kwargs): git_repo = kwargs.get(\"git_repo\") file_path = kwargs.get(\"file_path\") graph_name = kwargs.get(\"graph_name\") branch",
"\"type=\": \"input\"}) edit_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-edit\"})) col_3.append(edit_a) delete_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-danger\",",
"git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix, namespace) if work_type.startswith(\"article\"): citation = utilities.Article_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_article()",
"= MIMEText(text, _charset=\"UTF-8\") message.attach(body) if work_graph: work_turtle = io.StringIO( work_graph.serialize(format='turtle').decode()) attachment = MIMEText(work_turtle.read())",
"else: statement_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri))",
"start_year = now.year end_year = now.year + 1 self.current_year_path = \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year, end_year)",
"of Citation config: Configuration includes logins and administor \"\"\" work_graph = kwargs.get(\"graph\") config",
"periodical, issue_number) ) if result and len(result) > 0: new_issue = rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph,",
"form.get(\"research_stmt\") if len(statement) > 0: profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) form_subjects = form.getlist(\"subjects\")",
"set() existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) for row in self.research_statements.objects( subject=existing_stmt, predicate=SCHEMA.about): existing_subjects.add(row)",
"= kwargs.get(\"current_user_email\") config_manager = kwargs.get('config_manager') connection = config_manager.conns revised_by = kwargs.get(\"revised_by\") raw_citation =",
"'Edited Creative Work {}'.format(citation.iri) __email_work__(graph=temp_work, config=config, carbon_copy=[current_user_email,], subject=email_subject, text=\"Edited {} revised by {}",
"\"fas fa-book\"})) under_review = soup.new_tag(\"em\") under_review.string = \"In Review\" col_1.append(under_review) div.append(col_1) col_2 =",
"= rdflib.Graph() self.fast_subjects = rdflib.Graph() self.profile = kwargs.get(\"profile\") self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content",
"journal)) self.graph.add((work_iri, SCHEMA.partOf, issue)) elif volume is not None: self.graph.add((work_iri, SCHEMA.partOf, volume)) else:",
"= rdflib.Graph() self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle = self.__get_content__(\"scholarship_repo\", content)",
"{} on {}\".format( iri, author, current_user.data.get('mail'), datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Deletion of {}",
"self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) if citation_type.startswith(\"article\"): self.__add_article__(work_iri, work_form) elif citation_type.startswith(\"book chapter\"): self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter))",
"list(set(recipients)) # Quick dedup server.sendmail(sender, recipients, message.as_string()) server.close() def generate_citation_html(citation): soup = BeautifulSoup(\"\",",
"= None form = kwargs.get(\"form\") if form.get(\"orcid\"): person_uri = form.get(\"orcid\") else: person_uri =",
"person_iri)) profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) add_qualified_generation(profile.graph, statement_iri, generated_by) form_subjects = form.getlist(\"subjects\") new_subjects",
"message[\"From\"] = sender message[\"Subject\"] = subject message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in recipients])",
"config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') author",
"end_year) self.current_year = rdflib.Graph() self.cc_people = rdflib.Graph() self.tiger_repo = self.tutt_github.get_repo(\"tiger-catalog\") for content in",
"schema:name ?name . FILTER(CONTAINS(?name, \"{0}\")) }}\"\"\".format(label)) if result and len(result) > 0: periodical",
"=\"/data/creative-works.ttl\", graph_name=\"creative_works\", message=\"Creative Works added\") if connection: self.__reload_triplestore__(connection) def __reload_triplestore__(self, config_mgr): data_upload =",
"base64 import bibcat import datetime import hashlib import io import os import pprint",
"rdflib.Literal(work_form.month.data))) def __add_book__(self, work, work_form): self.graph.add((work, rdflib.RDF.type, SCHEMA.Book)) self.graph.add((work, SCHEMA.title, rdflib.Literal(work_form.book_title.data))) if work_form.isbn.data",
"= base64.b64decode(blob.content) return raw_turtle def __save_graph__(self, **kwargs): git_repo = kwargs.get(\"git_repo\") file_path = kwargs.get(\"file_path\")",
"and FAST subjects self.creative_works = rdflib.Graph() self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.scholarship_repo",
"directory in data_upload: os.chdir(directory) result = subprocess.run(['git', 'pull', 'origin', 'master']) click.echo(result.returncode, result.stdout) config_mgr.conns.datastore.mgr.reset()",
"!= None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) if citation_type.startswith(\"article\"): self.__add_article__(work_iri, work_form) elif citation_type.startswith(\"book chapter\"): self.graph.add((work_iri,",
"fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri] = fast_label for fast_subject, fast_label in new_subjects.items():",
"self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def __update_fast_subjects__(self): existing_subjects, new_subjects = set(), set() existing_stmt =",
"person_iri msg = \"{} made the following changes to {}'s academic profile:\\n\".format( generated_by,",
"SCHEMA.partOf, issue)) elif volume is not None: self.graph.add((work_iri, SCHEMA.partOf, volume)) else: # Add",
"kwargs.get(\"work_type\", \"article\") if revised_by is None and current_user_email: sparql = EMAIL_LOOKUP.format( current_user_email.lower()) email_results",
"rdflib.URIRef( email_results[0].get(\"person\").get('value')) work_iri = rdflib.URIRef(profile.add(work_form, generated_by)) #profile.update(\"Added or Updated Creative Work\") return {\"message\":",
"turtle file\".format( citation.citation_type, revised_by, datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Changes to work has been",
"review\", \"status\": True} def update_profile(**kwargs): \"\"\"Updates existing triples based on form values\"\"\" config_manager",
"config_mgr): data_upload = [] for row in config_mgr.get(\"CONNECTIONS\"): if row.get(\"name\").startswith(\"datastore\"): for directory_row in",
"self.research_statements.add((existing_stmt, SCHEMA.about, subject)) self.fast_subjects.add((subject, rdflib.RDF.type, BF.Topic)) subject_label = self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label) if subject_label is",
"Work\") return {\"message\": \"New work has been submitted for review\", \"status\": True, \"iri\":",
"{}\".format(graph_name)) git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else:",
"self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') def __save_graph__(self, **kwargs): file_path = kwargs.get(\"file_path\") branch",
"issue)) elif volume is not None: self.graph.add((work_iri, SCHEMA.partOf, volume)) else: # Add work_iri",
"current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') profile = EmailProfile(config) connection = config_manager.conns BF",
"= {} for row in form_subjects: fast_id, fast_label = row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri",
"iri_subject = rdflib.URIRef(fast_subject) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) existing_label = profile.fast_subjects.value( subject=iri_subject, predicate=rdflib.RDFS.label) if",
"col_2.append(span) page_start = citation.get(\"page_start\") if page_start and len(page_start) > 0: span = soup.new_tag(\"span\")",
"(statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {}\".format(label), lang=\"en\"))) profile.graph.add( (statement_iri,",
"the Administrators for review.\"\"\" def __init__(self, config, person_iri): self.config = config self.triplestore_url =",
"author), \"status\": True} def edit_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user_email =",
"for school year\".format(action)) self.__save_graph__( git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=\"{} Research Statement for {}\".format( action,",
"= kwargs.get('subject') text = kwargs.get('text') carbon_copy = kwargs.get(\"carbon_copy\", []) message = MIMEMultipart() message[\"From\"]",
"{} revised by {} on {}, see attached RDF turtle file\".format( citation.citation_type, revised_by,",
"is not None: issue_number = work_graph.value(subject=issue, predicate=SCHEMA.issueNumber) result = connection.datastore.query(\"\"\"SELECT ?issue WHERE {{",
"> 0: work_iri = rdflib.URIRef(work_form.iri.data) else: # Mint IRI for new work if",
"Profile class that creates a local RDF graph for new profile or editing",
"config.get(\"GITHUB_PWD\")) self.tutt_github = cc_github.get_organization(\"Tutt-Library\") self.statement_msg = kwargs.get(\"msg\") self.person_iri = kwargs.get(\"person\") self.research_statements = rdflib.Graph()",
"0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = None form = kwargs.get(\"form\") if form.get(\"orcid\"):",
"mimetypes import click import rdflib import requests from bs4 import BeautifulSoup from flask",
"rdflib.Literal(\"Research Statement for {}\".format(label), lang=\"en\"))) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement,",
"GithubException: blob = self.scholarship_repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse(",
"\"({0})\".format(citation.get(\"year\")) col_2.append(span) vol_number = citation.get(\"volume_number\") if vol_number and len(vol_number) > 0: span =",
"in work_form and len(work_form.url.data) > 0: self.graph.add((work_iri, SCHEMA.url, rdflib.URIRef(work_form.url.data))) if work_form.abstract.data != None:",
"profile = EmailProfile(config_manager, person_iri) msg = \"\" results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if",
"BF.Topic)) subject_label = self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label) if subject_label is not None: self.fast_subjects.add((subject, rdflib.RDFS.label, subject_label))",
"= form.getlist(\"subjects\") new_subjects = {} for row in form_subjects: fast_id, fast_label = row.split(\"==\")",
"creative work Args: work_iri(rdflib.URIRef): Creative Work IRI for Article work_form(Flask.request.form): Dict of form",
"= Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.triplestore_url = config.get(\"TRIPLESTORE_URL\") self.tutt_github = cc_github.get_organization(\"Tutt-Library\") # Start retrieving and",
"profile.graph, statement_iri, generated_by) citations = form.getlist(\"citations\") for uri in citations: profile.graph.add( (rdflib.URIRef(uri), SCHEMA.author.rdflib,",
"populate and save to datastore\"\"\" config = kwargs.get(\"config\") profile = EmailProfile(config) current_user =",
"kwargs.get(\"form\") if form.get(\"orcid\"): person_uri = form.get(\"orcid\") else: person_uri = \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()) person_iri =",
"now = datetime.datetime.utcnow() if now.month < 7: start_year = now.year - 1 end_year",
"__email_work__(**kwargs): \"\"\"Function takes a work graph and configuration and emails the graph in",
"if citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-file-alt\"})) elif citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-book\"})) under_review",
"= EmailProfile(config) connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema results =",
"generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = None form = kwargs.get(\"form\") if form.get(\"orcid\"): person_uri",
"work_graph.value(predicate=SCHEMA.partOf, object=volume) elif schema_class is SCHEMA.issueNumber: issue = volume_or_issue result = connection.datastore.query(\"\"\"SELECT ?periodical",
"the graph in turtle format to the administrators for review before adding to",
"filename='profile.ttl') message.attach(attachment) email_server.login( self.email.get(\"user\"), self.email.get(\"password\")) recipients = list(set(self.recipients)) # Quick dedup email_server.sendmail(self.email.get(\"user\"), recipients,",
"!= str(new_description): self.research_statements.remove((existing_stmt, SCHEMA.description, current_description)) self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt, SCHEMA.description, new_description)) def run(self): # Function",
"BIND(\"{2}\" as ?issue_number) }}\"\"\".format(volume, periodical, issue_number) ) if result and len(result) > 0:",
"save to datastore\"\"\" config = kwargs.get(\"config\") profile = EmailProfile(config) current_user = kwargs.get(\"current_user\") config_manager",
"if volume is not None: vol_num = work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber) result = connection.datastore.query(\"\"\"SELECT ?volume",
"RESEARCH_STMT_IRI.format( person_iri)) if len(statement_iri_results) > 0: statement_iri = rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph, statement_iri, generated_by)",
"\"Changes to work has been submitted for review\", \"status\": True} def update_profile(**kwargs): \"\"\"Updates",
"SCHEMA.about, rdflib.Literal(work_form.abstract.data))) if citation_type.startswith(\"article\"): self.__add_article__(work_iri, work_form) elif citation_type.startswith(\"book chapter\"): self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter)) book_bnode",
"kwargs.get('form') current_user = kwargs.get(\"current_user\") output = '' person_iri = rdflib.URIRef(form.get(\"iri\")) profile = EmailProfile(config_manager,",
"?volumeNumber . BIND(<{0}> as ?work) BIND(\"{1}\" as ?volumeNumber) }}\"\"\".format(entity, vol_num)) if result and",
"None: self.graph.add((issue, SCHEMA.partOf, volume)) else: self.graph.add((issue, SCHEMA.partOf, journal)) self.graph.add((work_iri, SCHEMA.partOf, issue)) elif volume",
"= \" no. {}\".format(issue_number) col_2.append(span) page_start = citation.get(\"page_start\") if page_start and len(page_start) >",
"self.research_statements.objects( subject=existing_stmt, predicate=SCHEMA.about): existing_subjects.add(row) for fast_heading in self.profile.graph.objects( subject=existing_stmt, predicate=SCHEMA.about): new_subjects.add(fast_heading) for subject",
"r in self.recipients]) message[\"Subject\"] = subject email_server = smtplib.SMTP( self.email.get(\"host\"), self.email.get(\"port\")) email_server.ehlo() if",
"for fast_subject, fast_label in new_subjects.items(): iri_subject = rdflib.URIRef(fast_subject) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) existing_label",
"rdflib.Graph() self.cc_people = rdflib.Graph() self.tiger_repo = self.tutt_github.get_repo(\"tiger-catalog\") for content in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle =",
"work_iri, work_form): \"\"\"Article specific data added to creative work Args: work_iri(rdflib.URIRef): Creative Work",
"(statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) add_qualified_generation(profile.graph, statement_iri, generated_by) form_subjects = form.getlist(\"subjects\") new_subjects = {}",
"self.graph_hashes[graph_name]: return git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch)",
"str(new_description): self.research_statements.remove((existing_stmt, SCHEMA.description, current_description)) self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt, SCHEMA.description, new_description)) def run(self): # Function iterates",
"= generated_by self.graph.add((work_iri, SCHEMA.author, generated_by)) if \"url\" in work_form and len(work_form.url.data) > 0:",
"False) citation.populate() citation.populate_book() citation.add_book() if revised_by: add_qualified_revision(temp_work, rdflib.URIRef(citation.iri), revised_by) email_subject = 'Edited Creative",
"lang=\"en\"))) email = form.get(\"email\") profile.graph.add((person_iri, SCHEMA.email.rdflib, rdflib.Literal(email))) add_qualified_generation(profile.graph, person_iri, generated_by) dept_year = kwargs.get(\"year-iri\")",
"graph_sha1 == self.graph_hashes[graph_name]: return git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: git_repo.update_file(file_path, message, graph.serialize(format='turtle'),",
"kwargs.get(\"branch\") graph_name = kwargs.get(\"graph_name\") graph = getattr(self, graph_name) message = kwargs.get(\"message\", \"Updating {}\".format(graph_name))",
"content.decoded_content except GithubException: repo = getattr(self, repo_name) blob = repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content)",
"config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if",
"= form.get(\"research_stmt\") if len(statement) > 0: profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) form_subjects =",
"family_name) profile.graph.add((person_iri, rdflib.RDFS.label, rdflib.Literal(label, lang=\"en\"))) email = form.get(\"email\") profile.graph.add((person_iri, SCHEMA.email.rdflib, rdflib.Literal(email))) add_qualified_generation(profile.graph, person_iri,",
"= \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year, end_year) self.current_year = rdflib.Graph() self.cc_people = rdflib.Graph() self.tiger_repo = self.tutt_github.get_repo(\"tiger-catalog\")",
"self.graph_hashes = {} cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.triplestore_url = config.get(\"TRIPLESTORE_URL\") self.tutt_github = cc_github.get_organization(\"Tutt-Library\")",
"{}\".format(label), lang=\"en\"))) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) add_qualified_generation(profile.graph, statement_iri,",
"for new profile or editing profile that is send via email to the",
"click.echo(result.returncode, result.stdout) config_mgr.conns.datastore.mgr.reset() class ProfileUpdateThread(threading.Thread): def __init__(self, **kwargs): threading.Thread.__init__(self) config = kwargs.get(\"config\") cc_github",
"connection.datastore.query(sparql) if len(email_results) > 0: revised_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) temp_work = rdflib.Graph() temp_work.namespace_manager.bind(\"cite\",",
"self.scholarship_repo.get_dir_contents(\"/data/\"): try: raw_turtle = content.decoded_content except GithubException: blob = self.scholarship_repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content)",
"Research Statement for {}\".format( action, person_label)) self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject",
"rdflib.Literal(\"Research Statement for {} {}\".format( form.get('given_name'), form.get('family_name')), lang=\"en\"))) add_qualified_generation( profile.graph, statement_iri, generated_by) citations",
"if len(carbon_copy) > 0: message[\"Cc\"] = ','.join(carbon_copy) recipients.extend(carbon_copy) body = MIMEText(text, _charset=\"UTF-8\") message.attach(body)",
"profile.graph.add( (dept_year_iri, rdflib.URIRef(title), person_iri)) statement = kwargs.get(\"statement\", form.get(\"research_stmt\")) if statement is not None:",
"has been submitted for review\", \"status\": True} def update_profile(**kwargs): \"\"\"Updates existing triples based",
"for content in self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle = self.__get_content__(\"scholarship_repo\", content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content",
"schema_class is SCHEMA.issueNumber: issue = volume_or_issue result = connection.datastore.query(\"\"\"SELECT ?periodical WHERE {{ ?periodical",
"profile stub to scholarship graph\"\"\" config = kwargs.get(\"config\") current_user = kwargs.get(\"current_user\") config_manager =",
"Works added\") if connection: self.__reload_triplestore__(connection) def __reload_triplestore__(self, config_mgr): data_upload = [] for row",
"in citation: name = citation.get(\"article_title\") elif \"title\" in citation: name = citation.get(\"title\") if",
"add_profile(**kwargs): \"\"\"Adds a profile stub to scholarship graph\"\"\" config = kwargs.get(\"config\") current_user =",
"import MIMEText import mimetypes import click import rdflib import requests from bs4 import",
"?periodical WHERE {{ ?periodical schema:name ?name . FILTER(CONTAINS(?name, \"{0}\")) }}\"\"\".format(label)) if result and",
"work_graph: work_turtle = io.StringIO( work_graph.serialize(format='turtle').decode()) attachment = MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='work.ttl') message.attach(attachment) #try:",
"self.fast_subjects = rdflib.Graph() self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle = self.__get_content__(\"scholarship_repo\",",
"profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\")))",
"label = \"{} {}\".format(given_name, family_name) profile.graph.add((person_iri, rdflib.RDFS.label, rdflib.Literal(label, lang=\"en\"))) email = form.get(\"email\") profile.graph.add((person_iri,",
"kwargs.get(\"config\") profile = EmailProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') connection = config_manager.conns",
"if row.startswith(\"csrf_token\"): continue field = getattr(work_form, row) email_body += \"\\n{}:\\t{}\".format(row, field.data) self.__send_email__(\"Added New",
"work_form.issue_number.data != None: issue = rdflib.BNode() self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue)) self.graph.add((issue, SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data))) if",
"add_qualified_revision BF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE = rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV = rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA = rdflib.Namespace(\"http://schema.org/\")",
"div.prettify() def __reconcile_article__(work_graph, connection): SCHEMA = rdflib.Namespace(\"http://schema.org/\") for row in work_graph.query( \"\"\"SELECT ?entity",
"connection.datastore.query(\"\"\"SELECT ?volume WHERE {{ ?volume schema:partOf ?work ; schema:volumeNumber ?volumeNumber . BIND(<{0}> as",
"BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema sparql = EMAIL_LOOKUP.format( current_user.data.get('mail').lower()) email_results = connection.datastore.query(sparql)",
"profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) existing_label = profile.fast_subjects.value( subject=iri_subject, predicate=rdflib.RDFS.label) if existing_label is None:",
"lang=\"en\"))) message = \"New {} as {} to Colorado College's Scholarship Graph\".format( label,",
"def __init__(self, config): self.graph_hashes = {} cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.triplestore_url = config.get(\"TRIPLESTORE_URL\")",
"Scholarship Graph\".format( label, person_iri) profile.new(message) def delete_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config)",
"= soup.new_tag(\"div\", **{\"class\": \"row\"}) col_1 = soup.new_tag(\"div\", **{\"class\": \"col-1\"}) citation_type = citation.get(\"ENTRYTYPE\") if",
"threading import uuid from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import mimetypes",
"return git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else:",
"in turtle format to the administrators for review before adding to production. Keyword",
"fast_subject, fast_label in new_subjects.items(): iri_subject = rdflib.URIRef(fast_subject) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) existing_label =",
"SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {} {}\".format( form.get('given_name'), form.get('family_name')), lang=\"en\")))",
"form = kwargs.get(\"form\") if form.get(\"orcid\"): person_uri = form.get(\"orcid\") else: person_uri = \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())",
"work_form and len(work_form.author.data) > 0: self.person_iri = rdflib.URIRef(work_form.author.data) self.graph.add((work_iri, SCHEMA.author, self.person_iri)) elif generated_by:",
"em.string = citation.get(\"journal_title\") col_2.append(em) if \"year\" in citation: span = soup.new_tag(\"span\") span.string =",
"config_manager = kwargs.get('config_manager') connection = config_manager.conns generated_by = kwargs.get(\"generated_by\") work_form = kwargs.get(\"work_form\") BF",
"current_description = self.research_statements.value( subject=existing_stmt, predicate=SCHEMA.description) new_description = self.profile.graph.value( subject=existing_stmt, predicate=SCHEMA.description) if new_description is",
"self.graph.namespace_manager.bind(\"cite\", CITE) self.graph.namespace_manager.bind(\"schema\", SCHEMA) self.graph.namespace_manager.bind(\"prov\", PROV) self.email = config.get(\"EMAIL\") self.recipients = config.get(\"ADMINS\") self.person_iri",
"datastore\"\"\" config = kwargs.get(\"config\") profile = EmailProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager')",
"work_iri def new(self, message): \"\"\"Adds a new profile\"\"\" self.__send_email__(\"Add new profile\", message) def",
"current_user.data.get('mail'), datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Deletion of {} for {} under review\".format( iri,",
"submitted for review\", \"status\": True} def update_profile(**kwargs): \"\"\"Updates existing triples based on form",
"if schema_class is SCHEMA.volumeNumber: volume = volume_or_issue issue = work_graph.value(predicate=SCHEMA.partOf, object=volume) elif schema_class",
"branch: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def update_all(self,",
"else: work_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) self.graph.add((work_iri, SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri, CITE.authorString, rdflib.Literal(work_form.author_string.data))) if",
"email_body) return work_iri def new(self, message): \"\"\"Adds a new profile\"\"\" self.__send_email__(\"Add new profile\",",
"(iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) profile.update(msg) return {\"message\": msg, \"status\":",
"if page_end and len(page_end) > 0: span = soup.new_tag(\"span\") if \"page_start\" in citation:",
"(statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {}\".format(label), lang=\"en\"))) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri,",
"volume)) else: # Add work_iri to Journal as last resort self.graph.add((work_iri, SCHEMA.partOf, journal))",
"if len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = person_iri msg =",
"is not None: vol_num = work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber) result = connection.datastore.query(\"\"\"SELECT ?volume WHERE {{",
"temp_work = rdflib.Graph() temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for prefix, namespace in git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix, namespace) if",
"**{\"class\": \"fas fa-edit\"})) col_3.append(edit_a) delete_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-danger\", \"onclick\": delete_click, \"type=\":",
"self.graph = rdflib.Graph() self.graph.namespace_manager.bind(\"bf\", BF) self.graph.namespace_manager.bind(\"cite\", CITE) self.graph.namespace_manager.bind(\"schema\", SCHEMA) self.graph.namespace_manager.bind(\"prov\", PROV) self.email =",
"graph_name) graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest() if graph_sha1 == self.graph_hashes[graph_name]: return git_graph = getattr(self, \"{}_git\".format(graph_name))",
"def edit_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user_email = kwargs.get(\"current_user_email\") config_manager =",
"graph in turtle format to the administrators for review before adding to production.",
"SCHEMA.partOf, volume)) else: self.graph.add((issue, SCHEMA.partOf, journal)) self.graph.add((work_iri, SCHEMA.partOf, issue)) elif volume is not",
"message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def __update_fast_subjects__(self): existing_subjects, new_subjects",
"self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): try: raw_turtle = content.decoded_content except GithubException:",
"True} def edit_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user_email = kwargs.get(\"current_user_email\") config_manager",
"server.ehlo() if config.get('EMAIL')['tls']: server.starttls() server.ehlo() server.login(sender, config.get(\"EMAIL\")[\"password\"]) recipients = list(set(recipients)) # Quick dedup",
"= ProfileUpdateThread( config=self.config, msg=message, person=self.person_iri, profile=self) BACKGROUND_THREAD.start() self.__send_email__(\"Updating Profile\", message) def __email_work__(**kwargs): \"\"\"Function",
"{}\\nrequested by {} on {}\".format( iri, author, current_user.data.get('mail'), datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Deletion",
"self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=self.statement_msg) self.__save_graph__( file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") class EmailProfile(object):",
"__init__(self, config): self.graph_hashes = {} cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.triplestore_url = config.get(\"TRIPLESTORE_URL\") self.tutt_github",
"citations = form.getlist(\"citations\") for uri in citations: profile.graph.add( (rdflib.URIRef(uri), SCHEMA.author.rdflib, person_iri)) statement =",
"= self.research_statements.value( subject=existing_stmt, predicate=SCHEMA.description) new_description = self.profile.graph.value( subject=existing_stmt, predicate=SCHEMA.description) if new_description is not",
"soup.new_tag(\"div\", **{\"class\": \"col-4\"}) iri = citation.get(\"iri\") if iri: edit_click = \"editCitation('{}');\".format(iri) delete_click =",
"of form values \"\"\" if len(work_form.iri.data) > 0: work_iri = rdflib.URIRef(work_form.iri.data) else: #",
"import hashlib import io import os import pprint import smtplib import subprocess import",
"= content self.fast_subjects.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"creative-works\"): self.creative_works_git = content self.creative_works.parse( data=raw_turtle, format='turtle')",
"= \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()) person_iri = rdflib.URIRef(person_uri) if generated_by is None: generated_by = person_iri",
"Citation config: Configuration includes logins and administor \"\"\" work_graph = kwargs.get(\"graph\") config =",
"global BACKGROUND_THREAD BACKGROUND_THREAD = ProfileUpdateThread( config=self.config, msg=message, person=self.person_iri, profile=self) BACKGROUND_THREAD.start() self.__send_email__(\"Updating Profile\", message)",
"\"p. {}\".format(page_start) col_2.append(span) page_end = citation.get(\"page_end\") if page_end and len(page_end) > 0: span",
"email_subject = 'Edited Creative Work {}'.format(citation.iri) __email_work__(graph=temp_work, config=config, carbon_copy=[current_user_email,], subject=email_subject, text=\"Edited {} revised",
"return work_iri def new(self, message): \"\"\"Adds a new profile\"\"\" self.__send_email__(\"Add new profile\", message)",
"0: new_issue = rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph, issue, new_issue) def add_creative_work(**kwargs): \"\"\"Calls utilities to populate",
"self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle = self.__get_content__(\"scholarship_repo\", content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git =",
"schema:Periodical ; schema:name ?label . } \"\"\"): entity, label = row break volume,",
"attached RDF turtle file\".format( citation.citation_type, revised_by, datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Changes to work",
"rdflib.Graph() self.tiger_repo = self.tutt_github.get_repo(\"tiger-catalog\") for content in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle = self.__get_content__(\"tiger_repo\", content) if",
"rdflib.Literal(fast_label, lang=\"en\"))) message = \"New {} as {} to Colorado College's Scholarship Graph\".format(",
"connection.datastore.query(sparql) if len(email_results) > 0: generated_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) work_iri = rdflib.URIRef(profile.add(work_form, generated_by))",
"span.string = \"p. {}\".format(page_start) col_2.append(span) page_end = citation.get(\"page_end\") if page_end and len(page_end) >",
"= kwargs.get(\"current_user\") output = '' person_iri = rdflib.URIRef(form.get(\"iri\")) profile = EmailProfile(config_manager, person_iri) msg",
"io.StringIO( self.graph.serialize(format='turtle').decode()) attachment = MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='profile.ttl') message.attach(attachment) email_server.login( self.email.get(\"user\"), self.email.get(\"password\")) recipients",
"with new work Args: form(Flask.request.form): Dict of form values \"\"\" if len(work_form.iri.data) >",
"profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {} {}\".format( form.get('given_name'),",
"col_2 = soup.new_tag(\"div\", **{\"class\": \"col-7\"}) if \"article_title\" in citation: name = citation.get(\"article_title\") elif",
"> 0: revised_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) temp_work = rdflib.Graph() temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for prefix,",
"branch = kwargs.get(\"branch\") message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) graph = getattr(self, graph_name) graph_sha1",
"send via email to the Administrators for review.\"\"\" def __init__(self, config, person_iri): self.config",
"**{\"class\": \"fas fa-trash-alt\"})) col_3.append(delete_a) div.append(col_3) return div.prettify() def __reconcile_article__(work_graph, connection): SCHEMA = rdflib.Namespace(\"http://schema.org/\")",
"io import os import pprint import smtplib import subprocess import threading import uuid",
"book_bnode)) self.__add_book__(book_bnode, work_form) elif citation_type.startswith(\"book\"): self.__add_book__(work_iri, work_form) else: abort(500) if work_form.abstract.data != None:",
"data_upload = [] for row in config_mgr.get(\"CONNECTIONS\"): if row.get(\"name\").startswith(\"datastore\"): for directory_row in row.get(\"data_upload\"):",
"= config.get('EMAIL')['user'] recipients = config.get(\"ADMINS\") subject = kwargs.get('subject') text = kwargs.get('text') carbon_copy =",
"**kwargs): threading.Thread.__init__(self) config = kwargs.get(\"config\") cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.tutt_github = cc_github.get_organization(\"Tutt-Library\") self.statement_msg",
"= rdflib.BNode() self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical)) self.graph.add((journal, SCHEMA.name, rdflib.Literal(work_form.journal_title.data))) issue, volume = None, None",
"= soup.new_tag(\"span\") span.string = name col_2.append(span) if \"journal_title\" in citation: em = soup.new_tag(\"em\")",
"EmailProfile(config) connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema results = connection.datastore.query(",
"= rdflib.Graph() self.fast_subjects = rdflib.Graph() self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle",
"'origin', 'master']) click.echo(result.returncode, result.stdout) config_mgr.conns.datastore.mgr.reset() class ProfileUpdateThread(threading.Thread): def __init__(self, **kwargs): threading.Thread.__init__(self) config =",
"soup.new_tag(\"div\", **{\"class\": \"col-1\"}) citation_type = citation.get(\"ENTRYTYPE\") if citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-file-alt\"})) elif",
"self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def __update_fast_subjects__(self): existing_subjects,",
"#try: server = smtplib.SMTP(config.get('EMAIL')['host'], config.get('EMAIL')['port']) server.ehlo() if config.get('EMAIL')['tls']: server.starttls() server.ehlo() server.login(sender, config.get(\"EMAIL\")[\"password\"]) recipients",
"the administrators for review before adding to production. Keyword args: work_graph(rdflib.Graph): RDF Graph",
"self.fast_subjects.serialize(format='n3')).hexdigest() def __get_content__(self, repo_name, content): raw_turtle = None try: raw_turtle = content.decoded_content except",
"config_mgr.get(\"CONNECTIONS\"): if row.get(\"name\").startswith(\"datastore\"): for directory_row in row.get(\"data_upload\"): data_upload.append(directory_row[1]) # Pull in the latest",
"self.graph_hashes[\"creative_works\"] = hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"] = hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"] = hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest() def",
"config_manager = kwargs.get('config_manager') connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema form",
"and len(page_start) > 0: span = soup.new_tag(\"span\") span.string = \"p. {}\".format(page_start) col_2.append(span) page_end",
"fa-trash-alt\"})) col_3.append(delete_a) div.append(col_3) return div.prettify() def __reconcile_article__(work_graph, connection): SCHEMA = rdflib.Namespace(\"http://schema.org/\") for row",
"(statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {} {}\".format( form.get('given_name'), form.get('family_name')), lang=\"en\"))) add_qualified_generation( profile.graph, statement_iri,",
"rdflib.Literal(label, lang=\"en\"))) email = form.get(\"email\") profile.graph.add((person_iri, SCHEMA.email.rdflib, rdflib.Literal(email))) add_qualified_generation(profile.graph, person_iri, generated_by) dept_year =",
"email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import mimetypes import click import rdflib",
"for {} under review\".format( iri, author), \"status\": True} def edit_creative_work(**kwargs): config = kwargs.get(\"config\")",
"= rdflib.URIRef( email_results[0].get(\"person\").get('value')) temp_work = rdflib.Graph() temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for prefix, namespace in git_profile.cc_people.namespaces():",
"self.__save_graph__( git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\", message=\"{} {} to CC People\".format(action, person_label)) self.__save_graph__( git_repo=self.tiger_repo, file_path=self.current_year_path,",
"?issue_number) }}\"\"\".format(volume, periodical, issue_number) ) if result and len(result) > 0: new_issue =",
"self.graph.add((work_iri, SCHEMA.url, rdflib.URIRef(work_form.url.data))) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) if citation_type.startswith(\"article\"): self.__add_article__(work_iri,",
"book_bnode = rdflib.BNode() self.graph.add((work_iri, SCHEMA.partOf, book_bnode)) self.__add_book__(book_bnode, work_form) elif citation_type.startswith(\"book\"): self.__add_book__(work_iri, work_form) else:",
"in self.research_statements.objects( subject=existing_stmt, predicate=SCHEMA.about): existing_subjects.add(row) for fast_heading in self.profile.graph.objects( subject=existing_stmt, predicate=SCHEMA.about): new_subjects.add(fast_heading) for",
"0: periodical = result[0].get(\"periodical\").get(\"value\") if periodical != str(entity): new_work = rdflib.URIRef(periodical) bibcat.replace_iri(work_graph, entity,",
"else: start_year = now.year end_year = now.year + 1 self.current_year_path = \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year,",
"BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) message = \"New {} as {} to",
"(dept_year_iri, rdflib.URIRef(title), person_iri)) statement = kwargs.get(\"statement\", form.get(\"research_stmt\")) if statement is not None: statement_iri",
"Work\", email_body) return work_iri def new(self, message): \"\"\"Adds a new profile\"\"\" self.__send_email__(\"Add new",
"raw_turtle = self.__get_content__(\"scholarship_repo\", content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle') if",
"schema_class is SCHEMA.volumeNumber: volume = volume_or_issue issue = work_graph.value(predicate=SCHEMA.partOf, object=volume) elif schema_class is",
"kwargs.get(\"statement\", form.get(\"research_stmt\")) if statement is not None: statement_iri = rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) profile.graph.add( (statement_iri,",
"self.graph.add( (work, SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data))) if work_form.notes.data is not None: self.graph.add( (work, SCHEMA.description, rdflib.Literal(work_form.notes.data)))",
"= soup.new_tag(\"a\", **{\"class\": \"btn btn-danger\", \"onclick\": delete_click, \"type=\": \"input\"}) delete_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-trash-alt\"}))",
"schema:issueNumber ?issue_number . OPTIONAL {{ ?issue schema:partOf ?volume . }} OPTIONAL {{ ?issue",
"None: self.graph.add( (work, SCHEMA.description, rdflib.Literal(work_form.notes.data))) def __populate_work__(self, work_form, generated_by=None): \"\"\"Populates graph with new",
"requests from bs4 import BeautifulSoup from flask import current_app from github import Github,",
"?work ; schema:volumeNumber ?volumeNumber . BIND(<{0}> as ?work) BIND(\"{1}\" as ?volumeNumber) }}\"\"\".format(entity, vol_num))",
"= rdflib.BNode() self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue)) self.graph.add((issue, SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data))) if volume is not None:",
"rdflib.RDF.type, BF.Topic)) subject_label = self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label) if subject_label is not None: self.fast_subjects.add((subject, rdflib.RDFS.label,",
"form values\"\"\" config_manager = kwargs.get('config_manager') connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA =",
"rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA = rdflib.Namespace(\"http://schema.org/\") class GitProfile(object): def __init__(self, config): self.graph_hashes = {} cc_github",
"git_repo=self.scholarship_repo, file_path =\"/data/creative-works.ttl\", graph_name=\"creative_works\", message=\"Creative Works added\") if connection: self.__reload_triplestore__(connection) def __reload_triplestore__(self, config_mgr):",
"profile:\\n\".format( generated_by, form['label']) statement_iri_results = connection.datastore.query( RESEARCH_STMT_IRI.format( person_iri)) if len(statement_iri_results) > 0: statement_iri",
"SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri, CITE.authorString, rdflib.Literal(work_form.author_string.data))) if generated_by: add_qualified_generation(self.graph, work_iri, generated_by) citation_type = work_form.citation_type.data",
"col_2.append(span) div.append(col_2) col_3 = soup.new_tag(\"div\", **{\"class\": \"col-4\"}) iri = citation.get(\"iri\") if iri: edit_click",
"> 0: generated_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) work_iri = rdflib.URIRef(profile.add(work_form, generated_by)) #profile.update(\"Added or Updated",
"page_start and len(page_start) > 0: span = soup.new_tag(\"span\") span.string = \"p. {}\".format(page_start) col_2.append(span)",
"git_profile.cc_people, False) citation.populate() citation.populate_book() citation.add_book() if revised_by: add_qualified_revision(temp_work, rdflib.URIRef(citation.iri), revised_by) email_subject = 'Edited",
"delete_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-danger\", \"onclick\": delete_click, \"type=\": \"input\"}) delete_a.append(soup.new_tag(\"i\", **{\"class\": \"fas",
"object=volume) elif schema_class is SCHEMA.issueNumber: issue = volume_or_issue result = connection.datastore.query(\"\"\"SELECT ?periodical WHERE",
"issue_number = work_graph.value(subject=issue, predicate=SCHEMA.issueNumber) result = connection.datastore.query(\"\"\"SELECT ?issue WHERE {{ ?issue rdf:type schema:issueNumber",
"rdflib.RDF.type, SCHEMA.Chapter)) book_bnode = rdflib.BNode() self.graph.add((work_iri, SCHEMA.partOf, book_bnode)) self.__add_book__(book_bnode, work_form) elif citation_type.startswith(\"book\"): self.__add_book__(work_iri,",
"\"iri\": work_iri} def add_profile(**kwargs): \"\"\"Adds a profile stub to scholarship graph\"\"\" config =",
"def add_profile(**kwargs): \"\"\"Adds a profile stub to scholarship graph\"\"\" config = kwargs.get(\"config\") current_user",
"RESEARCH_STMT_IRI from .sparql import add_qualified_generation, add_qualified_revision BF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE = rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV",
"= rdflib.URIRef(work_form.doi.data) else: work_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) self.graph.add((work_iri, SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri, CITE.authorString,",
"profile=self) BACKGROUND_THREAD.start() self.__send_email__(\"Updating Profile\", message) def __email_work__(**kwargs): \"\"\"Function takes a work graph and",
"for uri in citations: profile.graph.add( (rdflib.URIRef(uri), SCHEMA.author.rdflib, person_iri)) statement = form.get(\"research_stmt\") if len(statement)",
"in recipients]) if len(carbon_copy) > 0: message[\"Cc\"] = ','.join(carbon_copy) recipients.extend(carbon_copy) body = MIMEText(text,",
"if branch: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def",
"SCHEMA.Book)) self.graph.add((work, SCHEMA.title, rdflib.Literal(work_form.book_title.data))) if work_form.isbn.data is not None: self.graph.add((work, SCHEMA.isbn, rdflib.Literal(work_form.isbn.data))) if",
"result.stdout) config_mgr.conns.datastore.mgr.reset() class ProfileUpdateThread(threading.Thread): def __init__(self, **kwargs): threading.Thread.__init__(self) config = kwargs.get(\"config\") cc_github =",
"current_description)) self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt, SCHEMA.description, new_description)) def run(self): # Function iterates and commits any",
"from flask import current_app from github import Github, GithubException import utilities from .sparql",
"kwargs.get(\"iri\") __email_work__( config=config, carbon_copy=[current_user.data.get('mail'),], subject=\"Delete Request\", text=\"Delete citation {} for {}\\nrequested by {}",
"raw_citation = kwargs.get(\"citation\") work_type = kwargs.get(\"work_type\", \"article\") if revised_by is None and current_user_email:",
"new profile\", message) def update(self, message): \"\"\"Edits existing profile\"\"\" global BACKGROUND_THREAD BACKGROUND_THREAD =",
"= \"\" results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0: generated_by =",
"issue, volume = None, None if work_form.volume_number.data != None: volume = rdflib.BNode() self.graph.add((volume,",
"message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) graph = getattr(self, graph_name) graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest() if",
"return div.prettify() def __reconcile_article__(work_graph, connection): SCHEMA = rdflib.Namespace(\"http://schema.org/\") for row in work_graph.query( \"\"\"SELECT",
"BIND(<{1}> as ?periodical) BIND(\"{2}\" as ?issue_number) }}\"\"\".format(volume, periodical, issue_number) ) if result and",
"profile.graph.add((person_iri, SCHEMA.email.rdflib, rdflib.Literal(email))) add_qualified_generation(profile.graph, person_iri, generated_by) dept_year = kwargs.get(\"year-iri\") if dept_year is not",
"self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt, SCHEMA.description, new_description)) def run(self): # Function iterates and commits any changes",
"current_user = kwargs.get(\"current_user\") output = '' person_iri = rdflib.URIRef(form.get(\"iri\")) profile = EmailProfile(config_manager, person_iri)",
"Scholarship App\"\"\" __author__ = \"<NAME>\" import base64 import bibcat import datetime import hashlib",
"kwargs.get(\"person\") self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.profile = kwargs.get(\"profile\") self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\")",
"0: span = soup.new_tag(\"span\") span.string = \"v. {}\".format(vol_number) col_2.append(span) issue_number = citation.get(\"issue_number\") if",
"EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = None",
"not None \\ and str(current_description) != str(new_description): self.research_statements.remove((existing_stmt, SCHEMA.description, current_description)) self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt, SCHEMA.description,",
"= {} cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.triplestore_url = config.get(\"TRIPLESTORE_URL\") self.tutt_github = cc_github.get_organization(\"Tutt-Library\") #",
"or editing profile that is send via email to the Administrators for review.\"\"\"",
"soup.new_tag(\"div\", **{\"class\": \"col-7\"}) if \"article_title\" in citation: name = citation.get(\"article_title\") elif \"title\" in",
"elif volume is not None: self.graph.add((work_iri, SCHEMA.partOf, volume)) else: # Add work_iri to",
"= \"v. {}\".format(vol_number) col_2.append(span) issue_number = citation.get(\"issue_number\") if issue_number and len(issue_number ) >",
"= self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) for row in self.research_statements.objects( subject=existing_stmt, predicate=SCHEMA.about): existing_subjects.add(row) for fast_heading",
"schema:issueNumber ; schema:issueNumber ?issue_number . OPTIONAL {{ ?issue schema:partOf ?volume . }} OPTIONAL",
"= getattr(self, graph_name) message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) git_graph = getattr(self, \"{}_git\".format(graph_name)) if",
"> 0: self.graph.add((work_iri, SCHEMA.url, rdflib.URIRef(work_form.url.data))) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) if",
"citation.populate() citation.populate_article() citation.add_article() elif work_type.startswith(\"book\"): citation = utilities.Book_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_book()",
"hashlib.sha1(graph.serialize(format='n3')).hexdigest() if graph_sha1 == self.graph_hashes[graph_name]: return git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: git_repo.update_file(file_path,",
"self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle,",
"= rdflib.URIRef(fast_uri) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label,",
"content in self.scholarship_repo.get_dir_contents(\"/data/\"): try: raw_turtle = content.decoded_content except GithubException: blob = self.scholarship_repo.get_git_blob(content.sha) raw_turtle",
"predicate=rdflib.RDF.type) if schema_class is SCHEMA.volumeNumber: volume = volume_or_issue issue = work_graph.value(predicate=SCHEMA.partOf, object=volume) elif",
"span = soup.new_tag(\"span\") if \"page_start\" in citation: page_string = \"- {}.\" else: page_string",
"latest RDF for creative works, # research statements, and FAST subjects self.creative_works =",
"content in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle = self.__get_content__(\"tiger_repo\", content) if content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git = content self.current_year.parse(data=raw_turtle,",
"subject to research statements and fast subjects self.research_statements.add((existing_stmt, SCHEMA.about, subject)) self.fast_subjects.add((subject, rdflib.RDF.type, BF.Topic))",
"kwargs.get(\"message\", \"Updating {}\".format(graph_name)) git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha,",
"work_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) self.graph.add((work_iri, SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri, CITE.authorString, rdflib.Literal(work_form.author_string.data))) if generated_by:",
"SCHEMA.volumeNumber: volume = volume_or_issue issue = work_graph.value(predicate=SCHEMA.partOf, object=volume) elif schema_class is SCHEMA.issueNumber: issue",
"> 0: new_volume = rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph, volume, new_volume) if issue is not None:",
"is None: profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) message =",
"= config self.triplestore_url = self.config.get(\"TRIPLESTORE_URL\") self.graph = rdflib.Graph() self.graph.namespace_manager.bind(\"bf\", BF) self.graph.namespace_manager.bind(\"cite\", CITE) self.graph.namespace_manager.bind(\"schema\",",
"year\".format(action)) self.__save_graph__( git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=\"{} Research Statement for {}\".format( action, person_label)) self.__save_graph__(",
"is None: generated_by = person_iri profile.graph.add( (person_iri, rdflib.RDF.type, BF.Person.rdflib)) given_name = form.get(\"given_name\") if",
"= kwargs.get(\"file_path\") graph_name = kwargs.get(\"graph_name\") branch = kwargs.get(\"branch\") message = kwargs.get(\"message\", \"Updating {}\".format(graph_name))",
"rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {}\".format(label),",
"config.get('EMAIL')['user'] recipients = config.get(\"ADMINS\") subject = kwargs.get('subject') text = kwargs.get('text') carbon_copy = kwargs.get(\"carbon_copy\",",
"uuid.uuid1())) self.graph.add((work_iri, SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri, CITE.authorString, rdflib.Literal(work_form.author_string.data))) if generated_by: add_qualified_generation(self.graph, work_iri, generated_by) citation_type",
"rdflib.Literal(given_name, lang=\"en\"))) family_name = form.get(\"family_name\") if family_name is not None: profile.graph.add((person_iri, SCHEMA.familyName.rdflib, rdflib.Literal(family_name,",
"BACKGROUND_THREAD BACKGROUND_THREAD = ProfileUpdateThread( config=self.config, msg=message, person=self.person_iri, profile=self) BACKGROUND_THREAD.start() self.__send_email__(\"Updating Profile\", message) def",
"sparql = EMAIL_LOOKUP.format( current_user_email.lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0: revised_by =",
"= kwargs.get(\"msg\") self.person_iri = kwargs.get(\"person\") self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.profile =",
"if work_graph: work_turtle = io.StringIO( work_graph.serialize(format='turtle').decode()) attachment = MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='work.ttl') message.attach(attachment)",
"graph.serialize(format='turtle'), git_graph.sha) def __update_fast_subjects__(self): existing_subjects, new_subjects = set(), set() existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson,",
"len(page_end) > 0: span = soup.new_tag(\"span\") if \"page_start\" in citation: page_string = \"-",
"self.creative_works_git = content self.creative_works.parse( data=raw_turtle, format='turtle') self.graph_hashes[\"creative_works\"] = hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"] = hashlib.sha1(",
"if content.name.startswith(\"creative-works\"): self.creative_works_git = content self.creative_works.parse( data=raw_turtle, format='turtle') self.graph_hashes[\"creative_works\"] = hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"]",
"Configuration includes logins and administor \"\"\" work_graph = kwargs.get(\"graph\") config = kwargs.get(\"config\") sender",
"\"\"\"Article specific data added to creative work Args: work_iri(rdflib.URIRef): Creative Work IRI for",
"span = soup.new_tag(\"span\") span.string = \"p. {}\".format(page_start) col_2.append(span) page_end = citation.get(\"page_end\") if page_end",
"rdflib.RDF.type, SCHEMA.ScholarlyArticle)) self.graph.add((work_iri, SCHEMA.name, rdflib.Literal(work_form.article_title.data))) if work_form.page_start.data !=None: self.graph.add((work_iri, SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data))) if work_form.page_end.data",
"carbon_copy=[current_user_email,], subject=email_subject, text=\"Edited {} revised by {} on {}, see attached RDF turtle",
"action, person_label)) self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") self.__save_graph__( git_repo=self.scholarship_repo, file_path",
"work_iri(rdflib.URIRef): Creative Work IRI for Article work_form(Flask.request.form): Dict of form values \"\"\" self.graph.add((work_iri,",
"= io.StringIO( self.graph.serialize(format='turtle').decode()) attachment = MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='profile.ttl') message.attach(attachment) email_server.login( self.email.get(\"user\"), self.email.get(\"password\"))",
"family_name = form.get(\"family_name\") if family_name is not None: profile.graph.add((person_iri, SCHEMA.familyName.rdflib, rdflib.Literal(family_name, lang=\"en\"))) label",
"span.string = \"v. {}\".format(vol_number) col_2.append(span) issue_number = citation.get(\"issue_number\") if issue_number and len(issue_number )",
"schema:partOf ?volume . }} OPTIONAL {{ ?issue schema:partOf ?periodical . }} BIND(<{0}> as",
"predicate=SCHEMA.issueNumber) result = connection.datastore.query(\"\"\"SELECT ?issue WHERE {{ ?issue rdf:type schema:issueNumber ; schema:issueNumber ?issue_number",
"new_work) entity = new_work if volume is not None: vol_num = work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber)",
"submitted for review\", \"status\": True, \"iri\": work_iri} def add_profile(**kwargs): \"\"\"Adds a profile stub",
"= rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = None form = kwargs.get(\"form\") if form.get(\"orcid\"): person_uri =",
"= self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) current_description = self.research_statements.value( subject=existing_stmt, predicate=SCHEMA.description) new_description = self.profile.graph.value( subject=existing_stmt,",
"revised_by: add_qualified_revision(temp_work, rdflib.URIRef(citation.iri), revised_by) email_subject = 'Edited Creative Work {}'.format(citation.iri) __email_work__(graph=temp_work, config=config, carbon_copy=[current_user_email,],",
"= \"deleteCitation('{}');\".format(iri) edit_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-warning disabled\", \"onclick\": edit_click, \"type=\": \"input\"})",
"!=None: self.graph.add((work_iri, SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data))) if work_form.page_end.data !=None: self.graph.add((work_iri, SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data))) journal = rdflib.BNode()",
"Keyword args: work_graph(rdflib.Graph): RDF Graph of Citation config: Configuration includes logins and administor",
"row.get(\"data_upload\"): data_upload.append(directory_row[1]) # Pull in the latest changes in each repository for directory",
"self.tutt_github.get_repo(\"tiger-catalog\") for content in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle = self.__get_content__(\"tiger_repo\", content) if content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git =",
"= kwargs.get('config_manager') connection = config_manager.conns generated_by = kwargs.get(\"generated_by\") work_form = kwargs.get(\"work_form\") BF =",
"**{\"class\": \"fas fa-file-alt\"})) elif citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-book\"})) under_review = soup.new_tag(\"em\") under_review.string",
"== self.graph_hashes[graph_name]: return git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha,",
"= rdflib.URIRef(profile.add(work_form, generated_by)) #profile.update(\"Added or Updated Creative Work\") return {\"message\": \"New work has",
"rdflib.URIRef(work_form.url.data))) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) if citation_type.startswith(\"article\"): self.__add_article__(work_iri, work_form) elif",
"for current academic year # and CC people now = datetime.datetime.utcnow() if now.month",
"repository for directory in data_upload: os.chdir(directory) result = subprocess.run(['git', 'pull', 'origin', 'master']) click.echo(result.returncode,",
"in config_mgr.get(\"CONNECTIONS\"): if row.get(\"name\").startswith(\"datastore\"): for directory_row in row.get(\"data_upload\"): data_upload.append(directory_row[1]) # Pull in the",
"content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse(",
"self.graph.add((issue, SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data))) if volume is not None: self.graph.add((issue, SCHEMA.partOf, volume)) else: self.graph.add((issue,",
"sparql = EMAIL_LOOKUP.format( current_user.data.get('mail').lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0: generated_by =",
"None and current_user_email: sparql = EMAIL_LOOKUP.format( current_user_email.lower()) email_results = connection.datastore.query(sparql) if len(email_results) >",
"lang=\"en\"))) add_qualified_generation(profile.graph, statement_iri, generated_by) form_subjects = form.getlist(\"subjects\") new_subjects = {} for row in",
"message=\"{} {} to CC People\".format(action, person_label)) self.__save_graph__( git_repo=self.tiger_repo, file_path=self.current_year_path, graph_name=\"current_year\", message=\"{} person to",
"is not None: self.graph.add((work, SCHEMA.editor, rdflib.Literal(work_form.editor.data))) if work_form.provisionActivityStatement.data is not None: self.graph.add( (work,",
"editing profile that is send via email to the Administrators for review.\"\"\" def",
"\"Updating {}\".format(graph_name)) graph = getattr(self, graph_name) graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest() if graph_sha1 == self.graph_hashes[graph_name]:",
"graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest() if graph_sha1 == self.graph_hashes[graph_name]: return git_graph = getattr(self, \"{}_git\".format(graph_name)) if",
"getattr(self, graph_name) message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch:",
"has been submitted for review\", \"status\": True, \"iri\": work_iri} def add_profile(**kwargs): \"\"\"Adds a",
"self.recipients]) message[\"Subject\"] = subject email_server = smtplib.SMTP( self.email.get(\"host\"), self.email.get(\"port\")) email_server.ehlo() if self.email.get(\"tls\"): email_server.starttls()",
"div.append(col_2) col_3 = soup.new_tag(\"div\", **{\"class\": \"col-4\"}) iri = citation.get(\"iri\") if iri: edit_click =",
"RDF for current academic year # and CC people now = datetime.datetime.utcnow() if",
"to CC People\".format(action, person_label)) self.__save_graph__( git_repo=self.tiger_repo, file_path=self.current_year_path, graph_name=\"current_year\", message=\"{} person to Department for",
"not None: self.graph.add( (work, SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data))) if work_form.editor.data is not None: self.graph.add((work, SCHEMA.editor,",
"'master']) click.echo(result.returncode, result.stdout) config_mgr.conns.datastore.mgr.reset() class ProfileUpdateThread(threading.Thread): def __init__(self, **kwargs): threading.Thread.__init__(self) config = kwargs.get(\"config\")",
"self.graph.add((work_iri, SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data))) if work_form.page_end.data !=None: self.graph.add((work_iri, SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data))) journal = rdflib.BNode() self.graph.add((journal,",
"= 'Edited Creative Work {}'.format(citation.iri) __email_work__(graph=temp_work, config=config, carbon_copy=[current_user_email,], subject=email_subject, text=\"Edited {} revised by",
"rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\") CITE = rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\") PROV = rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA = rdflib.Namespace(\"http://schema.org/\") class GitProfile(object): def",
"citation.populate() citation.populate_book() citation.add_book() if revised_by: add_qualified_revision(temp_work, rdflib.URIRef(citation.iri), revised_by) email_subject = 'Edited Creative Work",
"email_server = smtplib.SMTP( self.email.get(\"host\"), self.email.get(\"port\")) email_server.ehlo() if self.email.get(\"tls\"): email_server.starttls() body = MIMEText(body, _charset=\"UTF-8\")",
"None: volume = rdflib.BNode() self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume)) self.graph.add((volume, SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume, SCHEMA.partOf, journal))",
"config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user_email = kwargs.get(\"current_user_email\") config_manager = kwargs.get('config_manager') connection",
"except GithubException: repo = getattr(self, repo_name) blob = repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) return",
"getattr(self, repo_name) blob = repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) return raw_turtle def __save_graph__(self, **kwargs):",
"volume)) else: self.graph.add((issue, SCHEMA.partOf, journal)) self.graph.add((work_iri, SCHEMA.partOf, issue)) elif volume is not None:",
"None: statement_iri = rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research",
"add_qualified_generation( profile.graph, statement_iri, generated_by) citations = form.getlist(\"citations\") for uri in citations: profile.graph.add( (rdflib.URIRef(uri),",
"len(statement) > 0: profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) form_subjects = form.getlist(\"subjects\") new_subjects =",
"None: self.graph.add((work, SCHEMA.isbn, rdflib.Literal(work_form.isbn.data))) if work_form.editionStatement.data is not None: self.graph.add( (work, SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data)))",
"field.data) self.__send_email__(\"Added New Work\", email_body) return work_iri def new(self, message): \"\"\"Adds a new",
"= hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"] = hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"] = hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest() def __get_content__(self,",
"entity, new_work) entity = new_work if volume is not None: vol_num = work_graph.value(subject=volume,",
"text = kwargs.get('text') carbon_copy = kwargs.get(\"carbon_copy\", []) message = MIMEMultipart() message[\"From\"] = sender",
"**{\"class\": \"col-4\"}) iri = citation.get(\"iri\") if iri: edit_click = \"editCitation('{}');\".format(iri) delete_click = \"deleteCitation('{}');\".format(iri)",
"rdflib.RDFS.label, subject_label)) def __update_research_statements__(self): existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) current_description = self.research_statements.value( subject=existing_stmt,",
"work_form.page_start.data !=None: self.graph.add((work_iri, SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data))) if work_form.page_end.data !=None: self.graph.add((work_iri, SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data))) journal =",
"getattr(work_form, row) email_body += \"\\n{}:\\t{}\".format(row, field.data) self.__send_email__(\"Added New Work\", email_body) return work_iri def",
"format='turtle') self.graph_hashes[\"creative_works\"] = hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"] = hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"] = hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest()",
"volume = rdflib.BNode() self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume)) self.graph.add((volume, SCHEMA.volumeNumber, rdflib.Literal(work_form.volume_number.data))) self.graph.add((volume, SCHEMA.partOf, journal)) if",
"self.graph.add((work_iri, SCHEMA.author, self.person_iri)) elif generated_by: self.person_iri = generated_by self.graph.add((work_iri, SCHEMA.author, generated_by)) if \"url\"",
"under_review = soup.new_tag(\"em\") under_review.string = \"In Review\" col_1.append(under_review) div.append(col_1) col_2 = soup.new_tag(\"div\", **{\"class\":",
"= kwargs.get('config_manager') author = kwargs.get(\"author\") connection = config_manager.conns iri = kwargs.get(\"iri\") __email_work__( config=config,",
"not None: self.fast_subjects.add((subject, rdflib.RDFS.label, subject_label)) def __update_research_statements__(self): existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri) current_description",
"git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: git_repo.update_file(file_path,",
"= kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') connection = config_manager.conns generated_by = kwargs.get(\"generated_by\") work_form =",
"= self.__populate_work__(work_form, generated_by) email_body = \"Properties and Values for Creative Work {}\".format(work_iri) for",
"object=self.person_iri) for row in self.research_statements.objects( subject=existing_stmt, predicate=SCHEMA.about): existing_subjects.add(row) for fast_heading in self.profile.graph.objects( subject=existing_stmt,",
"a work graph and configuration and emails the graph in turtle format to",
"(iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) message = \"New {} as {} to Colorado College's",
"for review\", \"status\": True, \"iri\": work_iri} def add_profile(**kwargs): \"\"\"Adds a profile stub to",
"None, None volume_or_issue = work_graph.value(predicate=SCHEMA.partOf, object=entity) schema_class = work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type) if schema_class is",
"graph with new work Args: form(Flask.request.form): Dict of form values \"\"\" if len(work_form.iri.data)",
"(rdflib.URIRef(uri), SCHEMA.author.rdflib, person_iri)) statement = form.get(\"research_stmt\") if len(statement) > 0: profile.graph.add( (statement_iri, SCHEMA.description.rdflib,",
"= \"{} made the following changes to {}'s academic profile:\\n\".format( generated_by, form['label']) statement_iri_results",
"review before adding to production. Keyword args: work_graph(rdflib.Graph): RDF Graph of Citation config:",
"= citation.get(\"iri\") if iri: edit_click = \"editCitation('{}');\".format(iri) delete_click = \"deleteCitation('{}');\".format(iri) edit_a = soup.new_tag(\"a\",",
"{}, see attached RDF turtle file\".format( citation.citation_type, revised_by, datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Changes",
"citation.add_book() if revised_by: add_qualified_revision(temp_work, rdflib.URIRef(citation.iri), revised_by) email_subject = 'Edited Creative Work {}'.format(citation.iri) __email_work__(graph=temp_work,",
"message.attach(attachment) email_server.login( self.email.get(\"user\"), self.email.get(\"password\")) recipients = list(set(self.recipients)) # Quick dedup email_server.sendmail(self.email.get(\"user\"), recipients, message.as_string())",
"{{ ?volume schema:partOf ?work ; schema:volumeNumber ?volumeNumber . BIND(<{0}> as ?work) BIND(\"{1}\" as",
"= now.year end_year = now.year + 1 self.current_year_path = \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year, end_year) self.current_year",
"= rdflib.Graph() self.profile = kwargs.get(\"profile\") self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): try:",
"the latest changes in each repository for directory in data_upload: os.chdir(directory) result =",
"\"type=\": \"input\"}) delete_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-trash-alt\"})) col_3.append(delete_a) div.append(col_3) return div.prettify() def __reconcile_article__(work_graph, connection):",
"data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') def __save_graph__(self, **kwargs):",
"temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for prefix, namespace in git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix, namespace) if work_type.startswith(\"article\"): citation =",
"server.sendmail(sender, recipients, message.as_string()) server.close() def generate_citation_html(citation): soup = BeautifulSoup(\"\", 'lxml') div = soup.new_tag(\"div\",",
"kwargs.get(\"current_user\") output = '' person_iri = rdflib.URIRef(form.get(\"iri\")) profile = EmailProfile(config_manager, person_iri) msg =",
"= EMAIL_LOOKUP.format( current_user_email.lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0: revised_by = rdflib.URIRef(",
"div.append(col_1) col_2 = soup.new_tag(\"div\", **{\"class\": \"col-7\"}) if \"article_title\" in citation: name = citation.get(\"article_title\")",
"work_form, generated_by=None): work_iri = self.__populate_work__(work_form, generated_by) email_body = \"Properties and Values for Creative",
"if \"article_title\" in citation: name = citation.get(\"article_title\") elif \"title\" in citation: name =",
"in work_form and len(work_form.doi.data) > 0: work_iri = rdflib.URIRef(work_form.doi.data) else: work_iri = rdflib.URIRef(",
"{}\".format(page_start) col_2.append(span) page_end = citation.get(\"page_end\") if page_end and len(page_end) > 0: span =",
"hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"] = hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest() # Start retrieving and parsing latest RDF",
"graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def update_all(self, person_label, action=\"Add\", connection=None):",
"SCHEMA.givenName.rdflib, rdflib.Literal(given_name, lang=\"en\"))) family_name = form.get(\"family_name\") if family_name is not None: profile.graph.add((person_iri, SCHEMA.familyName.rdflib,",
"rdflib.Literal(work_form.abstract.data))) if citation_type.startswith(\"article\"): self.__add_article__(work_iri, work_form) elif citation_type.startswith(\"book chapter\"): self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter)) book_bnode =",
"config = kwargs.get(\"config\") sender = config.get('EMAIL')['user'] recipients = config.get(\"ADMINS\") subject = kwargs.get('subject') text",
"rdflib.Literal(statement, lang=\"en\"))) form_subjects = form.getlist(\"subjects\") new_subjects = {} for row in form_subjects: fast_id,",
"person_iri profile.graph.add( (person_iri, rdflib.RDF.type, BF.Person.rdflib)) given_name = form.get(\"given_name\") if given_name is not None:",
"(statement_iri, SCHEMA.about.rdflib, iri_subject)) profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) profile.update(msg)",
"of form values \"\"\" self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.ScholarlyArticle)) self.graph.add((work_iri, SCHEMA.name, rdflib.Literal(work_form.article_title.data))) if work_form.page_start.data !=None:",
"periodical = result[0].get(\"periodical\").get(\"value\") if periodical != str(entity): new_work = rdflib.URIRef(periodical) bibcat.replace_iri(work_graph, entity, new_work)",
"for row in work_form._fields: if row.startswith(\"csrf_token\"): continue field = getattr(work_form, row) email_body +=",
"profile\"\"\" self.__send_email__(\"Add new profile\", message) def update(self, message): \"\"\"Edits existing profile\"\"\" global BACKGROUND_THREAD",
"year # and CC people now = datetime.datetime.utcnow() if now.month < 7: start_year",
"BIND(<{0}> as ?work) BIND(\"{1}\" as ?volumeNumber) }}\"\"\".format(entity, vol_num)) if result and len(result) >",
"kwargs.get(\"year-iri\") if dept_year is not None: dept_year_iri = rdflib.URIRef(dept_year_iri) title = kwargs.get(\"title-iri\") profile.graph.add(",
"file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject added\") class EmailProfile(object): \"\"\"Simple Email Profile class that",
"EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = person_iri",
"now.year + 1 self.current_year_path = \"/KnowledgeGraph/cc-{0}-{1}.ttl\".format( start_year, end_year) self.current_year = rdflib.Graph() self.cc_people =",
"work_form.notes.data is not None: self.graph.add( (work, SCHEMA.description, rdflib.Literal(work_form.notes.data))) def __populate_work__(self, work_form, generated_by=None): \"\"\"Populates",
"def __add_article__(self, work_iri, work_form): \"\"\"Article specific data added to creative work Args: work_iri(rdflib.URIRef):",
"message=\"Fast subject added\") self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/creative-works.ttl\", graph_name=\"creative_works\", message=\"Creative Works added\") if connection:",
"citation.populate_article() citation.add_article() elif work_type.startswith(\"book\"): citation = utilities.Book_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_book() citation.add_book()",
"repo = getattr(self, repo_name) blob = repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) return raw_turtle def",
"= hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest() self.graph_hashes[\"current_year\"] = hashlib.sha1( self.current_year.serialize(format='n3')).hexdigest() # Start retrieving and parsing latest",
"= getattr(self, \"{}_git\".format(graph_name)) if branch: self.scholarship_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: self.scholarship_repo.update_file(file_path, message,",
"kwargs.get(\"title-iri\") profile.graph.add( (dept_year_iri, rdflib.URIRef(title), person_iri)) statement = kwargs.get(\"statement\", form.get(\"research_stmt\")) if statement is not",
"work_iri = rdflib.URIRef(profile.add(work_form, generated_by)) #profile.update(\"Added or Updated Creative Work\") return {\"message\": \"New work",
"Work {}'.format(citation.iri) __email_work__(graph=temp_work, config=config, carbon_copy=[current_user_email,], subject=email_subject, text=\"Edited {} revised by {} on {},",
"self.profile.graph.value(subject=subject, predicate=rdflib.RDFS.label) if subject_label is not None: self.fast_subjects.add((subject, rdflib.RDFS.label, subject_label)) def __update_research_statements__(self): existing_stmt",
"= citation.get(\"issue_number\") if issue_number and len(issue_number ) > 0: span = soup.new_tag(\"span\") span.string",
"?issue schema:partOf ?volume . }} OPTIONAL {{ ?issue schema:partOf ?periodical . }} BIND(<{0}>",
"SCHEMA.description, current_description)) self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt, SCHEMA.description, new_description)) def run(self): # Function iterates and commits",
"page_string = \"{}.\" span.string = page_string.format(page_end) col_2.append(span) div.append(col_2) col_3 = soup.new_tag(\"div\", **{\"class\": \"col-4\"})",
"= config_manager.conns revised_by = kwargs.get(\"revised_by\") raw_citation = kwargs.get(\"citation\") work_type = kwargs.get(\"work_type\", \"article\") if",
"rdflib.Literal(work_form.abstract.data))) return work_iri def add(self, work_form, generated_by=None): work_iri = self.__populate_work__(work_form, generated_by) email_body =",
"= soup.new_tag(\"div\", **{\"class\": \"col-7\"}) if \"article_title\" in citation: name = citation.get(\"article_title\") elif \"title\"",
"to Department for school year\".format(action)) self.__save_graph__( git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=\"{} Research Statement for",
"is not None: self.fast_subjects.add((subject, rdflib.RDFS.label, subject_label)) def __update_research_statements__(self): existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri)",
"elif \"title\" in citation: name = citation.get(\"title\") if \"url\" in citation: work_link =",
"citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-file-alt\"})) elif citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-book\"})) under_review =",
"class EmailProfile(object): \"\"\"Simple Email Profile class that creates a local RDF graph for",
"= getattr(self, \"{}_git\".format(graph_name)) if branch: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha, branch=branch) else: git_repo.update_file(file_path, message,",
"= connection.datastore.query(sparql) if len(email_results) > 0: generated_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) work_iri = rdflib.URIRef(profile.add(work_form,",
"if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) if citation_type.startswith(\"article\"): self.__add_article__(work_iri, work_form) elif citation_type.startswith(\"book",
"'lxml') div = soup.new_tag(\"div\", **{\"class\": \"row\"}) col_1 = soup.new_tag(\"div\", **{\"class\": \"col-1\"}) citation_type =",
"row) email_body += \"\\n{}:\\t{}\".format(row, field.data) self.__send_email__(\"Added New Work\", email_body) return work_iri def new(self,",
"SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) add_qualified_generation(profile.graph, statement_iri, generated_by) form_subjects = form.getlist(\"subjects\") new_subjects = {} for",
"to datastore\"\"\" config = kwargs.get(\"config\") profile = EmailProfile(config) current_user = kwargs.get(\"current_user\") config_manager =",
"current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') author = kwargs.get(\"author\") connection = config_manager.conns iri",
"\"\"\"Simple Email Profile class that creates a local RDF graph for new profile",
"and len(result) > 0: new_issue = rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph, issue, new_issue) def add_creative_work(**kwargs): \"\"\"Calls",
"= utilities.Book_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_book() citation.add_book() if revised_by: add_qualified_revision(temp_work, rdflib.URIRef(citation.iri), revised_by)",
"file_path = kwargs.get(\"file_path\") branch = kwargs.get(\"branch\") graph_name = kwargs.get(\"graph_name\") graph = getattr(self, graph_name)",
"for content in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle = self.__get_content__(\"tiger_repo\", content) if content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git = content",
"work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type) if schema_class is SCHEMA.volumeNumber: volume = volume_or_issue issue = work_graph.value(predicate=SCHEMA.partOf, object=volume)",
"is not None: dept_year_iri = rdflib.URIRef(dept_year_iri) title = kwargs.get(\"title-iri\") profile.graph.add( (dept_year_iri, rdflib.URIRef(title), person_iri))",
"profile = EmailProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') connection = config_manager.conns generated_by",
"delete_click = \"deleteCitation('{}');\".format(iri) edit_a = soup.new_tag(\"a\", **{\"class\": \"btn btn-warning disabled\", \"onclick\": edit_click, \"type=\":",
"**{\"class\": \"fas fa-book\"})) under_review = soup.new_tag(\"em\") under_review.string = \"In Review\" col_1.append(under_review) div.append(col_1) col_2",
"generated_by)) #profile.update(\"Added or Updated Creative Work\") return {\"message\": \"New work has been submitted",
"dept_year = kwargs.get(\"year-iri\") if dept_year is not None: dept_year_iri = rdflib.URIRef(dept_year_iri) title =",
"new_subjects.items(): iri_subject = rdflib.URIRef(fast_subject) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) existing_label = profile.fast_subjects.value( subject=iri_subject, predicate=rdflib.RDFS.label)",
"{{ ?issue rdf:type schema:issueNumber ; schema:issueNumber ?issue_number . OPTIONAL {{ ?issue schema:partOf ?volume",
"and parsing latest RDF for creative works, # research statements, and FAST subjects",
"self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/creative-works.ttl\", graph_name=\"creative_works\", message=\"Creative Works added\") if connection: self.__reload_triplestore__(connection) def __reload_triplestore__(self,",
"on form values\"\"\" config_manager = kwargs.get('config_manager') connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA",
"new_description is not None \\ and str(current_description) != str(new_description): self.research_statements.remove((existing_stmt, SCHEMA.description, current_description)) self.research_statements.replace('\"','\\\"')",
"row in work_graph.query( \"\"\"SELECT ?entity ?label WHERE { ?entity rdf:type schema:Periodical ; schema:name",
"rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {}\".format(label), lang=\"en\"))) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, SCHEMA.description.rdflib,",
"iri, author, current_user.data.get('mail'), datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Deletion of {} for {} under",
"= config_manager.nsm.bf SCHEMA = config_manager.nsm.schema form = kwargs.get('form') current_user = kwargs.get(\"current_user\") output =",
"config_manager.nsm.schema form = kwargs.get('form') current_user = kwargs.get(\"current_user\") output = '' person_iri = rdflib.URIRef(form.get(\"iri\"))",
"= person_iri def __send_email__(self, subject, body): \"\"\"Sends email to administrators with attached profile",
"self.graph.add((work_iri, CITE.authorString, rdflib.Literal(work_form.author_string.data))) if generated_by: add_qualified_generation(self.graph, work_iri, generated_by) citation_type = work_form.citation_type.data self.graph.add((work_iri, CITE.citationType,",
"span = soup.new_tag(\"span\") span.string = \"({0})\".format(citation.get(\"year\")) col_2.append(span) vol_number = citation.get(\"volume_number\") if vol_number and",
"rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph, issue, new_issue) def add_creative_work(**kwargs): \"\"\"Calls utilities to populate and save to",
"graph_name=\"fast_subjects\", message=\"Fast subject added\") self.__save_graph__( git_repo=self.scholarship_repo, file_path =\"/data/creative-works.ttl\", graph_name=\"creative_works\", message=\"Creative Works added\") if",
"self.__send_email__(\"Added New Work\", email_body) return work_iri def new(self, message): \"\"\"Adds a new profile\"\"\"",
"= work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber) result = connection.datastore.query(\"\"\"SELECT ?volume WHERE {{ ?volume schema:partOf ?work ;",
"# and CC people now = datetime.datetime.utcnow() if now.month < 7: start_year =",
"self.config = config self.triplestore_url = self.config.get(\"TRIPLESTORE_URL\") self.graph = rdflib.Graph() self.graph.namespace_manager.bind(\"bf\", BF) self.graph.namespace_manager.bind(\"cite\", CITE)",
"\"\"\" if len(work_form.iri.data) > 0: work_iri = rdflib.URIRef(work_form.iri.data) else: # Mint IRI for",
"SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {}\".format(label), lang=\"en\"))) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri))",
"import BeautifulSoup from flask import current_app from github import Github, GithubException import utilities",
"{} cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.triplestore_url = config.get(\"TRIPLESTORE_URL\") self.tutt_github = cc_github.get_organization(\"Tutt-Library\") # Start",
"fast_heading in self.profile.graph.objects( subject=existing_stmt, predicate=SCHEMA.about): new_subjects.add(fast_heading) for subject in list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt, SCHEMA.about, subject))",
"form['label']) statement_iri_results = connection.datastore.query( RESEARCH_STMT_IRI.format( person_iri)) if len(statement_iri_results) > 0: statement_iri = rdflib.URIRef(",
"lang=\"en\"))) family_name = form.get(\"family_name\") if family_name is not None: profile.graph.add((person_iri, SCHEMA.familyName.rdflib, rdflib.Literal(family_name, lang=\"en\")))",
"as ?volume) BIND(<{1}> as ?periodical) BIND(\"{2}\" as ?issue_number) }}\"\"\".format(volume, periodical, issue_number) ) if",
"\"\"\"Populates graph with new work Args: form(Flask.request.form): Dict of form values \"\"\" if",
"= connection.datastore.query(\"\"\"SELECT ?volume WHERE {{ ?volume schema:partOf ?work ; schema:volumeNumber ?volumeNumber . BIND(<{0}>",
"to scholarship graph\"\"\" config = kwargs.get(\"config\") current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') profile",
"try: raw_turtle = content.decoded_content except GithubException: blob = self.scholarship_repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) if",
"raw_turtle def __save_graph__(self, **kwargs): git_repo = kwargs.get(\"git_repo\") file_path = kwargs.get(\"file_path\") graph_name = kwargs.get(\"graph_name\")",
"self.graph.add((work_iri, SCHEMA.author, generated_by)) if \"url\" in work_form and len(work_form.url.data) > 0: self.graph.add((work_iri, SCHEMA.url,",
"rdflib.URIRef(fast_subject) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) existing_label = profile.fast_subjects.value( subject=iri_subject, predicate=rdflib.RDFS.label) if existing_label is",
"rdflib.Literal(citation_type))) if \"author\" in work_form and len(work_form.author.data) > 0: self.person_iri = rdflib.URIRef(work_form.author.data) self.graph.add((work_iri,",
"self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.profile = kwargs.get(\"profile\") self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for",
"click import rdflib import requests from bs4 import BeautifulSoup from flask import current_app",
"= \"Properties and Values for Creative Work {}\".format(work_iri) for row in work_form._fields: if",
"}}\"\"\".format(entity, vol_num)) if result and len(result) > 0: new_volume = rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph, volume,",
"in git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix, namespace) if work_type.startswith(\"article\"): citation = utilities.Article_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate()",
"edit_click, \"type=\": \"input\"}) edit_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-edit\"})) col_3.append(edit_a) delete_a = soup.new_tag(\"a\", **{\"class\": \"btn",
"{}\".format(issue_number) col_2.append(span) page_start = citation.get(\"page_start\") if page_start and len(page_start) > 0: span =",
"**kwargs): file_path = kwargs.get(\"file_path\") branch = kwargs.get(\"branch\") graph_name = kwargs.get(\"graph_name\") graph = getattr(self,",
"{}\".format( iri, author, current_user.data.get('mail'), datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Deletion of {} for {}",
"span.string = page_string.format(page_end) col_2.append(span) div.append(col_2) col_3 = soup.new_tag(\"div\", **{\"class\": \"col-4\"}) iri = citation.get(\"iri\")",
"(work, SCHEMA.editionStatement, rdflib.Literal(work_form.editionStatement.data))) if work_form.editor.data is not None: self.graph.add((work, SCHEMA.editor, rdflib.Literal(work_form.editor.data))) if work_form.provisionActivityStatement.data",
"self.graph.add((issue, SCHEMA.partOf, journal)) self.graph.add((work_iri, SCHEMA.partOf, issue)) elif volume is not None: self.graph.add((work_iri, SCHEMA.partOf,",
"div = soup.new_tag(\"div\", **{\"class\": \"row\"}) col_1 = soup.new_tag(\"div\", **{\"class\": \"col-1\"}) citation_type = citation.get(\"ENTRYTYPE\")",
"= repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) return raw_turtle def __save_graph__(self, **kwargs): git_repo = kwargs.get(\"git_repo\")",
"= config_manager.nsm.bf SCHEMA = config_manager.nsm.schema sparql = EMAIL_LOOKUP.format( current_user.data.get('mail').lower()) email_results = connection.datastore.query(sparql) if",
"= MIMEText(body, _charset=\"UTF-8\") message.attach(body) graph_turtle = io.StringIO( self.graph.serialize(format='turtle').decode()) attachment = MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition', 'attachment',",
"work_iri, generated_by) citation_type = work_form.citation_type.data self.graph.add((work_iri, CITE.citationType, rdflib.Literal(citation_type))) if \"author\" in work_form and",
"message=\"{} person to Department for school year\".format(action)) self.__save_graph__( git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=\"{} Research",
"None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) return work_iri def add(self, work_form, generated_by=None): work_iri = self.__populate_work__(work_form,",
"= kwargs.get(\"config\") sender = config.get('EMAIL')['user'] recipients = config.get(\"ADMINS\") subject = kwargs.get('subject') text =",
"uri in citations: profile.graph.add( (rdflib.URIRef(uri), SCHEMA.author.rdflib, person_iri)) statement = form.get(\"research_stmt\") if len(statement) >",
"result = connection.datastore.query(\"\"\"SELECT ?volume WHERE {{ ?volume schema:partOf ?work ; schema:volumeNumber ?volumeNumber .",
". }} BIND(<{0}> as ?volume) BIND(<{1}> as ?periodical) BIND(\"{2}\" as ?issue_number) }}\"\"\".format(volume, periodical,",
"statement = kwargs.get(\"statement\", form.get(\"research_stmt\")) if statement is not None: statement_iri = rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1()))",
"PROV = rdflib.Namespace(\"http://www.w3.org/ns/prov#\") SCHEMA = rdflib.Namespace(\"http://schema.org/\") class GitProfile(object): def __init__(self, config): self.graph_hashes =",
"kwargs.get(\"carbon_copy\", []) message = MIMEMultipart() message[\"From\"] = sender message[\"Subject\"] = subject message[\"To\"] =",
"add_qualified_generation(self.graph, work_iri, generated_by) citation_type = work_form.citation_type.data self.graph.add((work_iri, CITE.citationType, rdflib.Literal(citation_type))) if \"author\" in work_form",
"> 0: statement_iri = rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph, statement_iri, generated_by) else: statement_iri = rdflib.URIRef(",
"= rdflib.Namespace(\"http://schema.org/\") for row in work_graph.query( \"\"\"SELECT ?entity ?label WHERE { ?entity rdf:type",
"__email_work__(graph=temp_work, config=config, carbon_copy=[current_user_email,], subject=email_subject, text=\"Edited {} revised by {} on {}, see attached",
"self.research_statements.remove((existing_stmt, SCHEMA.description, current_description)) self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt, SCHEMA.description, new_description)) def run(self): # Function iterates and",
"format to the administrators for review before adding to production. Keyword args: work_graph(rdflib.Graph):",
"if family_name is not None: profile.graph.add((person_iri, SCHEMA.familyName.rdflib, rdflib.Literal(family_name, lang=\"en\"))) label = \"{} {}\".format(given_name,",
"message): \"\"\"Edits existing profile\"\"\" global BACKGROUND_THREAD BACKGROUND_THREAD = ProfileUpdateThread( config=self.config, msg=message, person=self.person_iri, profile=self)",
"= cc_github.get_organization(\"Tutt-Library\") # Start retrieving and parsing latest RDF for current academic year",
"Dict of form values \"\"\" self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.ScholarlyArticle)) self.graph.add((work_iri, SCHEMA.name, rdflib.Literal(work_form.article_title.data))) if work_form.page_start.data",
"citation_type.startswith(\"book\"): self.__add_book__(work_iri, work_form) else: abort(500) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) return",
"journal)) if work_form.month.data != None: self.graph.add((work_iri, CITE.month, rdflib.Literal(work_form.month.data))) def __add_book__(self, work, work_form): self.graph.add((work,",
"return {\"message\": \"Changes to work has been submitted for review\", \"status\": True} def",
"person_iri)) if len(statement_iri_results) > 0: statement_iri = rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph, statement_iri, generated_by) else:",
"= row.split(\"==\") if fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) new_subjects[fast_uri] =",
"= config_manager.nsm.bf SCHEMA = config_manager.nsm.schema results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) >",
"self.graph.add((journal, SCHEMA.name, rdflib.Literal(work_form.journal_title.data))) issue, volume = None, None if work_form.volume_number.data != None: volume",
"self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): try: raw_turtle = content.decoded_content except GithubException: blob =",
"kwargs.get(\"work_form\") BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema sparql = EMAIL_LOOKUP.format( current_user.data.get('mail').lower()) email_results =",
"no. {}\".format(issue_number) col_2.append(span) page_start = citation.get(\"page_start\") if page_start and len(page_start) > 0: span",
"not None: profile.graph.add((person_iri, SCHEMA.familyName.rdflib, rdflib.Literal(family_name, lang=\"en\"))) label = \"{} {}\".format(given_name, family_name) profile.graph.add((person_iri, rdflib.RDFS.label,",
"with attached profile graph\"\"\" message = MIMEMultipart() message[\"From\"] = self.email.get(\"user\") message[\"To\"] = \",\".join([\"<{0}>\".format(r)",
"= io.StringIO( work_graph.serialize(format='turtle').decode()) attachment = MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='work.ttl') message.attach(attachment) #try: server =",
"volume_or_issue issue = work_graph.value(predicate=SCHEMA.partOf, object=volume) elif schema_class is SCHEMA.issueNumber: issue = volume_or_issue result",
"if issue is not None: issue_number = work_graph.value(subject=issue, predicate=SCHEMA.issueNumber) result = connection.datastore.query(\"\"\"SELECT ?issue",
"current_user_email.lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0: revised_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) temp_work",
"citation_type = citation.get(\"ENTRYTYPE\") if citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-file-alt\"})) elif citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\", **{\"class\":",
"?periodical . }} BIND(<{0}> as ?volume) BIND(<{1}> as ?periodical) BIND(\"{2}\" as ?issue_number) }}\"\"\".format(volume,",
"kwargs.get(\"graph_name\") branch = kwargs.get(\"branch\") message = kwargs.get(\"message\", \"Updating {}\".format(graph_name)) graph = getattr(self, graph_name)",
"self.research_statements.value( subject=existing_stmt, predicate=SCHEMA.description) new_description = self.profile.graph.value( subject=existing_stmt, predicate=SCHEMA.description) if new_description is not None",
"import bibcat import datetime import hashlib import io import os import pprint import",
"kwargs.get(\"profile\") self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): try: raw_turtle = content.decoded_content except",
"if \"url\" in citation: work_link = soup.new_tag(\"a\", href=citation.get(\"url\")) work_link.string = name col_2.append(work_link) else:",
"def __send_email__(self, subject, body): \"\"\"Sends email to administrators with attached profile graph\"\"\" message",
"','.join(carbon_copy) recipients.extend(carbon_copy) body = MIMEText(text, _charset=\"UTF-8\") message.attach(body) if work_graph: work_turtle = io.StringIO( work_graph.serialize(format='turtle').decode())",
"WHERE { ?entity rdf:type schema:Periodical ; schema:name ?label . } \"\"\"): entity, label",
"= config_manager.nsm.schema results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0: generated_by =",
"profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) form_subjects = form.getlist(\"subjects\") new_subjects = {} for row",
"lang=\"en\"))) label = \"{} {}\".format(given_name, family_name) profile.graph.add((person_iri, rdflib.RDFS.label, rdflib.Literal(label, lang=\"en\"))) email = form.get(\"email\")",
"?volumeNumber) }}\"\"\".format(entity, vol_num)) if result and len(result) > 0: new_volume = rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph,",
"result = connection.datastore.query(\"\"\"SELECT ?periodical WHERE {{ ?periodical schema:name ?name . FILTER(CONTAINS(?name, \"{0}\")) }}\"\"\".format(label))",
"body = MIMEText(text, _charset=\"UTF-8\") message.attach(body) if work_graph: work_turtle = io.StringIO( work_graph.serialize(format='turtle').decode()) attachment =",
"work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) if citation_type.startswith(\"article\"): self.__add_article__(work_iri, work_form) elif citation_type.startswith(\"book chapter\"):",
"current_user.data.get('mail').lower())) if len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = None form",
"message.attach(body) if work_graph: work_turtle = io.StringIO( work_graph.serialize(format='turtle').decode()) attachment = MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='work.ttl')",
"form.get(\"family_name\") if family_name is not None: profile.graph.add((person_iri, SCHEMA.familyName.rdflib, rdflib.Literal(family_name, lang=\"en\"))) label = \"{}",
"add_creative_work(**kwargs): \"\"\"Calls utilities to populate and save to datastore\"\"\" config = kwargs.get(\"config\") profile",
"self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.ScholarlyArticle)) self.graph.add((work_iri, SCHEMA.name, rdflib.Literal(work_form.article_title.data))) if work_form.page_start.data !=None: self.graph.add((work_iri, SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data))) if",
"SCHEMA.about.rdflib, iri_subject)) profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label, lang=\"en\"))) profile.update(msg) return",
"_charset=\"UTF-8\") message.attach(body) graph_turtle = io.StringIO( self.graph.serialize(format='turtle').decode()) attachment = MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='profile.ttl') message.attach(attachment)",
"email_server.sendmail(self.email.get(\"user\"), recipients, message.as_string()) email_server.close() def __add_article__(self, work_iri, work_form): \"\"\"Article specific data added to",
"}}\"\"\".format(volume, periodical, issue_number) ) if result and len(result) > 0: new_issue = rdflib.URIRef(result[0].get(\"issue\").get(\"value\"))",
"if work_form.issue_number.data != None: issue = rdflib.BNode() self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue)) self.graph.add((issue, SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data)))",
"citation.add_article() elif work_type.startswith(\"book\"): citation = utilities.Book_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_book() citation.add_book() if",
"person_iri)) statement = kwargs.get(\"statement\", form.get(\"research_stmt\")) if statement is not None: statement_iri = rdflib.URIRef(\"http://catalog.coloradocollege.edu/{}\".format(",
"file\".format( citation.citation_type, revised_by, datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Changes to work has been submitted",
"work_form.month.data != None: self.graph.add((work_iri, CITE.month, rdflib.Literal(work_form.month.data))) def __add_book__(self, work, work_form): self.graph.add((work, rdflib.RDF.type, SCHEMA.Book))",
"if len(email_results) > 0: revised_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) temp_work = rdflib.Graph() temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\"))",
"as ?issue_number) }}\"\"\".format(volume, periodical, issue_number) ) if result and len(result) > 0: new_issue",
"ProfileUpdateThread(threading.Thread): def __init__(self, **kwargs): threading.Thread.__init__(self) config = kwargs.get(\"config\") cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.tutt_github",
"to self.__update_fast_subjects__() self.__update_research_statements__() self.__save_graph__( file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=self.statement_msg) self.__save_graph__( file_path =\"/data/cc-fast-subjects.ttl\", graph_name=\"fast_subjects\", message=\"Fast subject",
"message.as_string()) email_server.close() def __add_article__(self, work_iri, work_form): \"\"\"Article specific data added to creative work",
"work_form): \"\"\"Article specific data added to creative work Args: work_iri(rdflib.URIRef): Creative Work IRI",
"__populate_work__(self, work_form, generated_by=None): \"\"\"Populates graph with new work Args: form(Flask.request.form): Dict of form",
"content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') def __save_graph__(self, **kwargs): file_path = kwargs.get(\"file_path\")",
"kwargs.get('config_manager') connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema form = kwargs.get('form')",
"rdflib.Literal(work_form.book_title.data))) if work_form.isbn.data is not None: self.graph.add((work, SCHEMA.isbn, rdflib.Literal(work_form.isbn.data))) if work_form.editionStatement.data is not",
"BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema form = kwargs.get('form') current_user = kwargs.get(\"current_user\") output",
"self.email.get(\"password\")) recipients = list(set(self.recipients)) # Quick dedup email_server.sendmail(self.email.get(\"user\"), recipients, message.as_string()) email_server.close() def __add_article__(self,",
"person_label)) self.__save_graph__( git_repo=self.tiger_repo, file_path=self.current_year_path, graph_name=\"current_year\", message=\"{} person to Department for school year\".format(action)) self.__save_graph__(",
"soup.new_tag(\"a\", **{\"class\": \"btn btn-danger\", \"onclick\": delete_click, \"type=\": \"input\"}) delete_a.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-trash-alt\"})) col_3.append(delete_a)",
"local RDF graph for new profile or editing profile that is send via",
"profile that is send via email to the Administrators for review.\"\"\" def __init__(self,",
"from email.mime.text import MIMEText import mimetypes import click import rdflib import requests from",
"result = connection.datastore.query(\"\"\"SELECT ?issue WHERE {{ ?issue rdf:type schema:issueNumber ; schema:issueNumber ?issue_number .",
"self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue)) self.graph.add((issue, SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data))) if volume is not None: self.graph.add((issue, SCHEMA.partOf,",
"self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"):",
"\"\" results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value'))",
"page_end = citation.get(\"page_end\") if page_end and len(page_end) > 0: span = soup.new_tag(\"span\") if",
"connection = config_manager.conns generated_by = kwargs.get(\"generated_by\") work_form = kwargs.get(\"work_form\") BF = config_manager.nsm.bf SCHEMA",
"return {\"message\": \"Deletion of {} for {} under review\".format( iri, author), \"status\": True}",
"rdflib.Literal(work_form.provisionActivityStatement.data))) if work_form.notes.data is not None: self.graph.add( (work, SCHEMA.description, rdflib.Literal(work_form.notes.data))) def __populate_work__(self, work_form,",
"SCHEMA.Chapter)) book_bnode = rdflib.BNode() self.graph.add((work_iri, SCHEMA.partOf, book_bnode)) self.__add_book__(book_bnode, work_form) elif citation_type.startswith(\"book\"): self.__add_book__(work_iri, work_form)",
"SCHEMA.partOf, journal)) if work_form.month.data != None: self.graph.add((work_iri, CITE.month, rdflib.Literal(work_form.month.data))) def __add_book__(self, work, work_form):",
"else: abort(500) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) return work_iri def add(self,",
"config.get(\"EMAIL\")[\"password\"]) recipients = list(set(recipients)) # Quick dedup server.sendmail(sender, recipients, message.as_string()) server.close() def generate_citation_html(citation):",
"PROV) self.email = config.get(\"EMAIL\") self.recipients = config.get(\"ADMINS\") self.person_iri = person_iri def __send_email__(self, subject,",
"def add_creative_work(**kwargs): \"\"\"Calls utilities to populate and save to datastore\"\"\" config = kwargs.get(\"config\")",
"from github import Github, GithubException import utilities from .sparql import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI",
"if new_description is not None \\ and str(current_description) != str(new_description): self.research_statements.remove((existing_stmt, SCHEMA.description, current_description))",
"citation.get(\"article_title\") elif \"title\" in citation: name = citation.get(\"title\") if \"url\" in citation: work_link",
"self.__add_book__(book_bnode, work_form) elif citation_type.startswith(\"book\"): self.__add_book__(work_iri, work_form) else: abort(500) if work_form.abstract.data != None: self.graph.add((work_iri,",
"'pull', 'origin', 'master']) click.echo(result.returncode, result.stdout) config_mgr.conns.datastore.mgr.reset() class ProfileUpdateThread(threading.Thread): def __init__(self, **kwargs): threading.Thread.__init__(self) config",
"and CC people now = datetime.datetime.utcnow() if now.month < 7: start_year = now.year",
"BeautifulSoup(\"\", 'lxml') div = soup.new_tag(\"div\", **{\"class\": \"row\"}) col_1 = soup.new_tag(\"div\", **{\"class\": \"col-1\"}) citation_type",
"email_results[0].get(\"person\").get('value')) work_iri = rdflib.URIRef(profile.add(work_form, generated_by)) #profile.update(\"Added or Updated Creative Work\") return {\"message\": \"New",
"= soup.new_tag(\"a\", **{\"class\": \"btn btn-warning disabled\", \"onclick\": edit_click, \"type=\": \"input\"}) edit_a.append(soup.new_tag(\"i\", **{\"class\": \"fas",
". OPTIONAL {{ ?issue schema:partOf ?volume . }} OPTIONAL {{ ?issue schema:partOf ?periodical",
"subject=existing_stmt, predicate=SCHEMA.about): new_subjects.add(fast_heading) for subject in list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt, SCHEMA.about, subject)) for subject in",
"recipients = config.get(\"ADMINS\") subject = kwargs.get('subject') text = kwargs.get('text') carbon_copy = kwargs.get(\"carbon_copy\", [])",
"GitProfile(config) current_user = kwargs.get(\"current_user\") config_manager = kwargs.get('config_manager') author = kwargs.get(\"author\") connection = config_manager.conns",
"statement_iri, generated_by) citations = form.getlist(\"citations\") for uri in citations: profile.graph.add( (rdflib.URIRef(uri), SCHEMA.author.rdflib, person_iri))",
"self.current_year.parse(data=raw_turtle, format='turtle') if content.name.startswith(\"cc-people\"): self.cc_people_git = content self.cc_people.parse(data=raw_turtle, format='turtle') self.graph_hashes[\"cc_people\"] = hashlib.sha1( self.cc_people.serialize(format='n3')).hexdigest()",
"not None: self.graph.add( (work, SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data))) if work_form.notes.data is not None: self.graph.add( (work,",
"\"\"\" self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.ScholarlyArticle)) self.graph.add((work_iri, SCHEMA.name, rdflib.Literal(work_form.article_title.data))) if work_form.page_start.data !=None: self.graph.add((work_iri, SCHEMA.pageStart, rdflib.Literal(work_form.page_start.data)))",
"volume_or_issue result = connection.datastore.query(\"\"\"SELECT ?periodical WHERE {{ ?periodical schema:name ?name . FILTER(CONTAINS(?name, \"{0}\"))",
"SCHEMA = config_manager.nsm.schema form = kwargs.get('form') current_user = kwargs.get(\"current_user\") output = '' person_iri",
"new_description)) def run(self): # Function iterates and commits any changes to self.__update_fast_subjects__() self.__update_research_statements__()",
"self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"creative-works\"):",
"= EMAIL_LOOKUP.format( current_user.data.get('mail').lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0: generated_by = rdflib.URIRef(",
"= connection.datastore.query(\"\"\"SELECT ?issue WHERE {{ ?issue rdf:type schema:issueNumber ; schema:issueNumber ?issue_number . OPTIONAL",
"self.graph_hashes[\"research_statements\"] = hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"] = hashlib.sha1( self.fast_subjects.serialize(format='n3')).hexdigest() def __get_content__(self, repo_name, content): raw_turtle",
"\"author\" in work_form and len(work_form.author.data) > 0: self.person_iri = rdflib.URIRef(work_form.author.data) self.graph.add((work_iri, SCHEMA.author, self.person_iri))",
"= kwargs.get('config_manager') profile = EmailProfile(config) connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA =",
"iri, author), \"status\": True} def edit_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config) current_user_email",
"rdflib.URIRef(work_form.doi.data) else: work_iri = rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format( uuid.uuid1())) self.graph.add((work_iri, SCHEMA.dataPublished, rdflib.Literal(work_form.datePublished.data))) self.graph.add((work_iri, CITE.authorString, rdflib.Literal(work_form.author_string.data)))",
"if generated_by is None: generated_by = person_iri profile.graph.add( (person_iri, rdflib.RDF.type, BF.Person.rdflib)) given_name =",
"config_manager.nsm.bf SCHEMA = config_manager.nsm.schema form = kwargs.get('form') current_user = kwargs.get(\"current_user\") output = ''",
"\"page_start\" in citation: page_string = \"- {}.\" else: page_string = \"{}.\" span.string =",
"config.get('EMAIL')['tls']: server.starttls() server.ehlo() server.login(sender, config.get(\"EMAIL\")[\"password\"]) recipients = list(set(recipients)) # Quick dedup server.sendmail(sender, recipients,",
"message[\"To\"] = \",\".join([\"<{0}>\".format(r) for r in recipients]) if len(carbon_copy) > 0: message[\"Cc\"] =",
"\",\".join([\"<{0}>\".format(r) for r in self.recipients]) message[\"Subject\"] = subject email_server = smtplib.SMTP( self.email.get(\"host\"), self.email.get(\"port\"))",
"{ ?entity rdf:type schema:Periodical ; schema:name ?label . } \"\"\"): entity, label =",
"review\".format( iri, author), \"status\": True} def edit_creative_work(**kwargs): config = kwargs.get(\"config\") git_profile = GitProfile(config)",
"for creative works, # research statements, and FAST subjects self.creative_works = rdflib.Graph() self.research_statements",
"rdflib.Literal(statement, lang=\"en\"))) add_qualified_generation(profile.graph, statement_iri, generated_by) form_subjects = form.getlist(\"subjects\") new_subjects = {} for row",
"len(email_results) > 0: revised_by = rdflib.URIRef( email_results[0].get(\"person\").get('value')) temp_work = rdflib.Graph() temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for",
"True, \"iri\": work_iri} def add_profile(**kwargs): \"\"\"Adds a profile stub to scholarship graph\"\"\" config",
"predicate=SCHEMA.description) new_description = self.profile.graph.value( subject=existing_stmt, predicate=SCHEMA.description) if new_description is not None \\ and",
"\"article_title\" in citation: name = citation.get(\"article_title\") elif \"title\" in citation: name = citation.get(\"title\")",
"lang=\"en\"))) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) add_qualified_generation(profile.graph, statement_iri, generated_by)",
"= config.get(\"ADMINS\") subject = kwargs.get('subject') text = kwargs.get('text') carbon_copy = kwargs.get(\"carbon_copy\", []) message",
"class GitProfile(object): def __init__(self, config): self.graph_hashes = {} cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\")) self.triplestore_url",
"rdflib.Literal(work_form.editor.data))) if work_form.provisionActivityStatement.data is not None: self.graph.add( (work, SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data))) if work_form.notes.data is",
"MIMEText(graph_turtle.read()) attachment.add_header('Content-Disposition', 'attachment', filename='profile.ttl') message.attach(attachment) email_server.login( self.email.get(\"user\"), self.email.get(\"password\")) recipients = list(set(self.recipients)) # Quick",
"}}\"\"\".format(label)) if result and len(result) > 0: periodical = result[0].get(\"periodical\").get(\"value\") if periodical !=",
"work_form(Flask.request.form): Dict of form values \"\"\" self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.ScholarlyArticle)) self.graph.add((work_iri, SCHEMA.name, rdflib.Literal(work_form.article_title.data))) if",
"self.profile.graph.objects( subject=existing_stmt, predicate=SCHEMA.about): new_subjects.add(fast_heading) for subject in list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt, SCHEMA.about, subject)) for subject",
"message[\"Subject\"] = subject email_server = smtplib.SMTP( self.email.get(\"host\"), self.email.get(\"port\")) email_server.ehlo() if self.email.get(\"tls\"): email_server.starttls() body",
"chapter\"): self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter)) book_bnode = rdflib.BNode() self.graph.add((work_iri, SCHEMA.partOf, book_bnode)) self.__add_book__(book_bnode, work_form) elif",
"= work_graph.value(predicate=SCHEMA.partOf, object=entity) schema_class = work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type) if schema_class is SCHEMA.volumeNumber: volume =",
"config_mgr.conns.datastore.mgr.reset() class ProfileUpdateThread(threading.Thread): def __init__(self, **kwargs): threading.Thread.__init__(self) config = kwargs.get(\"config\") cc_github = Github(config.get(\"GITHUB_USER\"),",
"= kwargs.get(\"citation\") work_type = kwargs.get(\"work_type\", \"article\") if revised_by is None and current_user_email: sparql",
"row in config_mgr.get(\"CONNECTIONS\"): if row.get(\"name\").startswith(\"datastore\"): for directory_row in row.get(\"data_upload\"): data_upload.append(directory_row[1]) # Pull in",
"col_1.append(under_review) div.append(col_1) col_2 = soup.new_tag(\"div\", **{\"class\": \"col-7\"}) if \"article_title\" in citation: name =",
"= content self.current_year.parse(data=raw_turtle, format='turtle') if content.name.startswith(\"cc-people\"): self.cc_people_git = content self.cc_people.parse(data=raw_turtle, format='turtle') self.graph_hashes[\"cc_people\"] =",
"retrieving and parsing latest RDF for current academic year # and CC people",
"rdflib.Literal(work_form.page_start.data))) if work_form.page_end.data !=None: self.graph.add((work_iri, SCHEMA.pageEnd, rdflib.Literal(work_form.page_end.data))) journal = rdflib.BNode() self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical))",
"in each repository for directory in data_upload: os.chdir(directory) result = subprocess.run(['git', 'pull', 'origin',",
"= kwargs.get(\"message\", \"Updating {}\".format(graph_name)) graph = getattr(self, graph_name) graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest() if graph_sha1",
"SCHEMA.email.rdflib, rdflib.Literal(email))) add_qualified_generation(profile.graph, person_iri, generated_by) dept_year = kwargs.get(\"year-iri\") if dept_year is not None:",
"else: self.graph.add((issue, SCHEMA.partOf, journal)) self.graph.add((work_iri, SCHEMA.partOf, issue)) elif volume is not None: self.graph.add((work_iri,",
"format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"creative-works\"): self.creative_works_git =",
"elif citation_type.startswith(\"book chapter\"): self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter)) book_bnode = rdflib.BNode() self.graph.add((work_iri, SCHEMA.partOf, book_bnode)) self.__add_book__(book_bnode,",
"kwargs.get('config_manager') profile = EmailProfile(config) connection = config_manager.conns BF = config_manager.nsm.bf SCHEMA = config_manager.nsm.schema",
"if len(statement_iri_results) > 0: statement_iri = rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph, statement_iri, generated_by) else: statement_iri",
"# Add work_iri to Journal as last resort self.graph.add((work_iri, SCHEMA.partOf, journal)) if work_form.month.data",
"for review before adding to production. Keyword args: work_graph(rdflib.Graph): RDF Graph of Citation",
"BF.Person.rdflib)) given_name = form.get(\"given_name\") if given_name is not None: profile.graph.add( (person_iri, SCHEMA.givenName.rdflib, rdflib.Literal(given_name,",
"#profile.update(\"Added or Updated Creative Work\") return {\"message\": \"New work has been submitted for",
"> 0: profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) form_subjects = form.getlist(\"subjects\") new_subjects = {}",
"schema:partOf ?work ; schema:volumeNumber ?volumeNumber . BIND(<{0}> as ?work) BIND(\"{1}\" as ?volumeNumber) }}\"\"\".format(entity,",
"connection = config_manager.conns iri = kwargs.get(\"iri\") __email_work__( config=config, carbon_copy=[current_user.data.get('mail'),], subject=\"Delete Request\", text=\"Delete citation",
"row in work_form._fields: if row.startswith(\"csrf_token\"): continue field = getattr(work_form, row) email_body += \"\\n{}:\\t{}\".format(row,",
"data=raw_turtle, format='turtle') self.graph_hashes[\"creative_works\"] = hashlib.sha1( self.creative_works.serialize(format='n3')).hexdigest() self.graph_hashes[\"research_statements\"] = hashlib.sha1( self.research_statements.serialize(format='n3')).hexdigest() self.graph_hashes[\"fast_subjects\"] = hashlib.sha1(",
"message[\"Cc\"] = ','.join(carbon_copy) recipients.extend(carbon_copy) body = MIMEText(text, _charset=\"UTF-8\") message.attach(body) if work_graph: work_turtle =",
"MIMEText(text, _charset=\"UTF-8\") message.attach(body) if work_graph: work_turtle = io.StringIO( work_graph.serialize(format='turtle').decode()) attachment = MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition',",
"kwargs.get(\"config\") git_profile = GitProfile(config) current_user_email = kwargs.get(\"current_user_email\") config_manager = kwargs.get('config_manager') connection = config_manager.conns",
"SCHEMA.url, rdflib.URIRef(work_form.url.data))) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data))) if citation_type.startswith(\"article\"): self.__add_article__(work_iri, work_form)",
"?entity rdf:type schema:Periodical ; schema:name ?label . } \"\"\"): entity, label = row",
"= citation.get(\"ENTRYTYPE\") if citation_type.startswith(\"article\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas fa-file-alt\"})) elif citation_type.endswith(\"book\"): col_1.append(soup.new_tag(\"i\", **{\"class\": \"fas",
"self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle = self.__get_content__(\"tiger_repo\", content) if content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git = content self.current_year.parse(data=raw_turtle, format='turtle') if",
"{{ ?issue schema:partOf ?periodical . }} BIND(<{0}> as ?volume) BIND(<{1}> as ?periodical) BIND(\"{2}\"",
"= rdflib.URIRef(form.get(\"iri\")) profile = EmailProfile(config_manager, person_iri) msg = \"\" results = connection.datastore.query( EMAIL_LOOKUP.format(",
"class ProfileUpdateThread(threading.Thread): def __init__(self, **kwargs): threading.Thread.__init__(self) config = kwargs.get(\"config\") cc_github = Github(config.get(\"GITHUB_USER\"), config.get(\"GITHUB_PWD\"))",
"= rdflib.URIRef(result[0].get(\"issue\").get(\"value\")) bibcat.replace_iri(work_graph, issue, new_issue) def add_creative_work(**kwargs): \"\"\"Calls utilities to populate and save",
"rdflib.Graph() self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): raw_turtle = self.__get_content__(\"scholarship_repo\", content) if",
"in the latest changes in each repository for directory in data_upload: os.chdir(directory) result",
"len(result) > 0: new_volume = rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph, volume, new_volume) if issue is not",
"work has been submitted for review\", \"status\": True, \"iri\": work_iri} def add_profile(**kwargs): \"\"\"Adds",
"field = getattr(work_form, row) email_body += \"\\n{}:\\t{}\".format(row, field.data) self.__send_email__(\"Added New Work\", email_body) return",
"\"\"\"SELECT ?entity ?label WHERE { ?entity rdf:type schema:Periodical ; schema:name ?label . }",
"subprocess.run(['git', 'pull', 'origin', 'master']) click.echo(result.returncode, result.stdout) config_mgr.conns.datastore.mgr.reset() class ProfileUpdateThread(threading.Thread): def __init__(self, **kwargs): threading.Thread.__init__(self)",
"col_2.append(span) issue_number = citation.get(\"issue_number\") if issue_number and len(issue_number ) > 0: span =",
"?periodical) BIND(\"{2}\" as ?issue_number) }}\"\"\".format(volume, periodical, issue_number) ) if result and len(result) >",
"to production. Keyword args: work_graph(rdflib.Graph): RDF Graph of Citation config: Configuration includes logins",
"config, person_iri): self.config = config self.triplestore_url = self.config.get(\"TRIPLESTORE_URL\") self.graph = rdflib.Graph() self.graph.namespace_manager.bind(\"bf\", BF)",
"profile.graph.add( (statement_iri, rdflib.RDFS.label, rdflib.Literal(\"Research Statement for {} {}\".format( form.get('given_name'), form.get('family_name')), lang=\"en\"))) add_qualified_generation( profile.graph,",
"in citations: profile.graph.add( (rdflib.URIRef(uri), SCHEMA.author.rdflib, person_iri)) statement = form.get(\"research_stmt\") if len(statement) > 0:",
"content.decoded_content except GithubException: blob = self.scholarship_repo.get_git_blob(content.sha) raw_turtle = base64.b64decode(blob.content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git =",
"attachment.add_header('Content-Disposition', 'attachment', filename='profile.ttl') message.attach(attachment) email_server.login( self.email.get(\"user\"), self.email.get(\"password\")) recipients = list(set(self.recipients)) # Quick dedup",
"self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical)) self.graph.add((journal, SCHEMA.name, rdflib.Literal(work_form.journal_title.data))) issue, volume = None, None if work_form.volume_number.data",
"statements and fast subjects self.research_statements.add((existing_stmt, SCHEMA.about, subject)) self.fast_subjects.add((subject, rdflib.RDF.type, BF.Topic)) subject_label = self.profile.graph.value(subject=subject,",
"rdflib.URIRef(fast_uri) profile.graph.add( (statement_iri, SCHEMA.about.rdflib, iri_subject)) profile.graph.add( (iri_subject, rdflib.RDF.type, BF.Topic.rdflib)) profile.graph.add( (iri_subject, rdflib.RDFS.label, rdflib.Literal(fast_label,",
"is SCHEMA.volumeNumber: volume = volume_or_issue issue = work_graph.value(predicate=SCHEMA.partOf, object=volume) elif schema_class is SCHEMA.issueNumber:",
"to the Administrators for review.\"\"\" def __init__(self, config, person_iri): self.config = config self.triplestore_url",
"and Values for Creative Work {}\".format(work_iri) for row in work_form._fields: if row.startswith(\"csrf_token\"): continue",
"__reconcile_article__(work_graph, connection): SCHEMA = rdflib.Namespace(\"http://schema.org/\") for row in work_graph.query( \"\"\"SELECT ?entity ?label WHERE",
"file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\", message=\"{} {} to CC People\".format(action, person_label)) self.__save_graph__( git_repo=self.tiger_repo, file_path=self.current_year_path, graph_name=\"current_year\", message=\"{}",
"namespace) if work_type.startswith(\"article\"): citation = utilities.Article_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate() citation.populate_article() citation.add_article() elif",
"EMAIL_LOOKUP.format( current_user_email.lower()) email_results = connection.datastore.query(sparql) if len(email_results) > 0: revised_by = rdflib.URIRef( email_results[0].get(\"person\").get('value'))",
"not None: self.graph.add( (work, SCHEMA.description, rdflib.Literal(work_form.notes.data))) def __populate_work__(self, work_form, generated_by=None): \"\"\"Populates graph with",
"0: new_volume = rdflib.URIRef(result[0].get(\"volume\").get(\"value\")) bibcat.replace_iri(work_graph, volume, new_volume) if issue is not None: issue_number",
"if graph_sha1 == self.graph_hashes[graph_name]: return git_graph = getattr(self, \"{}_git\".format(graph_name)) if branch: git_repo.update_file(file_path, message,",
"kwargs.get(\"config\") sender = config.get('EMAIL')['user'] recipients = config.get(\"ADMINS\") subject = kwargs.get('subject') text = kwargs.get('text')",
"for review\", \"status\": True} def update_profile(**kwargs): \"\"\"Updates existing triples based on form values\"\"\"",
"\"\"\"Updates existing triples based on form values\"\"\" config_manager = kwargs.get('config_manager') connection = config_manager.conns",
"self.graph.add((work_iri, CITE.month, rdflib.Literal(work_form.month.data))) def __add_book__(self, work, work_form): self.graph.add((work, rdflib.RDF.type, SCHEMA.Book)) self.graph.add((work, SCHEMA.title, rdflib.Literal(work_form.book_title.data)))",
"new_work if volume is not None: vol_num = work_graph.value(subject=volume, predicate=SCHEMA.volumeNumber) result = connection.datastore.query(\"\"\"SELECT",
"self.email.get(\"user\"), self.email.get(\"password\")) recipients = list(set(self.recipients)) # Quick dedup email_server.sendmail(self.email.get(\"user\"), recipients, message.as_string()) email_server.close() def",
"None: self.graph.add((work_iri, CITE.month, rdflib.Literal(work_form.month.data))) def __add_book__(self, work, work_form): self.graph.add((work, rdflib.RDF.type, SCHEMA.Book)) self.graph.add((work, SCHEMA.title,",
"config_manager = kwargs.get('config_manager') author = kwargs.get(\"author\") connection = config_manager.conns iri = kwargs.get(\"iri\") __email_work__(",
"self.graph.add((work_iri, SCHEMA.partOf, book_bnode)) self.__add_book__(book_bnode, work_form) elif citation_type.startswith(\"book\"): self.__add_book__(work_iri, work_form) else: abort(500) if work_form.abstract.data",
"for directory in data_upload: os.chdir(directory) result = subprocess.run(['git', 'pull', 'origin', 'master']) click.echo(result.returncode, result.stdout)",
"work_iri = self.__populate_work__(work_form, generated_by) email_body = \"Properties and Values for Creative Work {}\".format(work_iri)",
"= form.getlist(\"citations\") for uri in citations: profile.graph.add( (rdflib.URIRef(uri), SCHEMA.author.rdflib, person_iri)) statement = form.get(\"research_stmt\")",
"git_repo = kwargs.get(\"git_repo\") file_path = kwargs.get(\"file_path\") graph_name = kwargs.get(\"graph_name\") branch = kwargs.get(\"branch\") message",
"self.graph.namespace_manager.bind(\"bf\", BF) self.graph.namespace_manager.bind(\"cite\", CITE) self.graph.namespace_manager.bind(\"schema\", SCHEMA) self.graph.namespace_manager.bind(\"prov\", PROV) self.email = config.get(\"EMAIL\") self.recipients =",
"a profile stub to scholarship graph\"\"\" config = kwargs.get(\"config\") current_user = kwargs.get(\"current_user\") config_manager",
"git_graph.sha) def update_all(self, person_label, action=\"Add\", connection=None): self.__save_graph__( git_repo=self.tiger_repo, file_path=\"/KnowledgeGraph/cc-people.ttl\", graph_name=\"cc_people\", message=\"{} {} to",
"= kwargs.get(\"carbon_copy\", []) message = MIMEMultipart() message[\"From\"] = sender message[\"Subject\"] = subject message[\"To\"]",
"git_graph.sha) def __update_fast_subjects__(self): existing_subjects, new_subjects = set(), set() existing_stmt = self.research_statements.value( predicate=SCHEMA.accountablePerson, object=self.person_iri)",
"import MIMEMultipart from email.mime.text import MIMEText import mimetypes import click import rdflib import",
"; schema:issueNumber ?issue_number . OPTIONAL {{ ?issue schema:partOf ?volume . }} OPTIONAL {{",
"existing_subjects.add(row) for fast_heading in self.profile.graph.objects( subject=existing_stmt, predicate=SCHEMA.about): new_subjects.add(fast_heading) for subject in list(existing_subjects.difference(new_subjects)): self.research_statements.remove((existing_stmt,",
"if len(work_form.iri.data) > 0: work_iri = rdflib.URIRef(work_form.iri.data) else: # Mint IRI for new",
"smtplib import subprocess import threading import uuid from email.mime.multipart import MIMEMultipart from email.mime.text",
"branch=branch) else: git_repo.update_file(file_path, message, graph.serialize(format='turtle'), git_graph.sha) def update_all(self, person_label, action=\"Add\", connection=None): self.__save_graph__( git_repo=self.tiger_repo,",
"if result and len(result) > 0: periodical = result[0].get(\"periodical\").get(\"value\") if periodical != str(entity):",
"rdflib.URIRef(results[0].get(\"person\").get('value')) else: generated_by = person_iri msg = \"{} made the following changes to",
"__email_work__( config=config, carbon_copy=[current_user.data.get('mail'),], subject=\"Delete Request\", text=\"Delete citation {} for {}\\nrequested by {} on",
"profile.graph.add( (rdflib.URIRef(uri), SCHEMA.author.rdflib, person_iri)) statement = form.get(\"research_stmt\") if len(statement) > 0: profile.graph.add( (statement_iri,",
"= volume_or_issue issue = work_graph.value(predicate=SCHEMA.partOf, object=volume) elif schema_class is SCHEMA.issueNumber: issue = volume_or_issue",
"rdflib.URIRef( \"http://catalog.coloradocollege.edu/{}\".format(uuid.uuid1())) profile.graph.add( (statement_iri, rdflib.RDF.type, SCHEMA.DigitalDocument.rdflib)) profile.graph.add( (statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, rdflib.RDFS.label,",
"periodical != str(entity): new_work = rdflib.URIRef(periodical) bibcat.replace_iri(work_graph, entity, new_work) entity = new_work if",
"predicate=rdflib.RDFS.label) if subject_label is not None: self.fast_subjects.add((subject, rdflib.RDFS.label, subject_label)) def __update_research_statements__(self): existing_stmt =",
"email_server.login( self.email.get(\"user\"), self.email.get(\"password\")) recipients = list(set(self.recipients)) # Quick dedup email_server.sendmail(self.email.get(\"user\"), recipients, message.as_string()) email_server.close()",
"that is send via email to the Administrators for review.\"\"\" def __init__(self, config,",
"(statement_iri, SCHEMA.accountablePerson.rdflib, person_iri)) profile.graph.add( (statement_iri, SCHEMA.description.rdflib, rdflib.Literal(statement, lang=\"en\"))) add_qualified_generation(profile.graph, statement_iri, generated_by) form_subjects =",
"= kwargs.get(\"profile\") self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in self.scholarship_repo.get_dir_contents(\"/data/\"): try: raw_turtle = content.decoded_content",
"= self.tutt_github.get_repo(\"tiger-catalog\") for content in self.tiger_repo.get_dir_contents(\"/KnowledgeGraph/\"): raw_turtle = self.__get_content__(\"tiger_repo\", content) if content.name.startswith(self.current_year_path.split(\"/\")[-1]): self.current_year_git",
"msg = \"\" results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0: generated_by",
"_charset=\"UTF-8\") message.attach(body) if work_graph: work_turtle = io.StringIO( work_graph.serialize(format='turtle').decode()) attachment = MIMEText(work_turtle.read()) attachment.add_header('Content-Disposition', 'attachment',",
"SCHEMA.description, rdflib.Literal(work_form.notes.data))) def __populate_work__(self, work_form, generated_by=None): \"\"\"Populates graph with new work Args: form(Flask.request.form):",
"revised_by, datetime.datetime.utcnow().isoformat()) ) return {\"message\": \"Changes to work has been submitted for review\",",
"smtplib.SMTP( self.email.get(\"host\"), self.email.get(\"port\")) email_server.ehlo() if self.email.get(\"tls\"): email_server.starttls() body = MIMEText(body, _charset=\"UTF-8\") message.attach(body) graph_turtle",
"SCHEMA.issueNumber, rdflib.Literal(work_form.issue_number.data))) if volume is not None: self.graph.add((issue, SCHEMA.partOf, volume)) else: self.graph.add((issue, SCHEMA.partOf,",
"None: self.graph.add( (work, SCHEMA.provisionActivityStatement, rdflib.Literal(work_form.provisionActivityStatement.data))) if work_form.notes.data is not None: self.graph.add( (work, SCHEMA.description,",
"resort self.graph.add((work_iri, SCHEMA.partOf, journal)) if work_form.month.data != None: self.graph.add((work_iri, CITE.month, rdflib.Literal(work_form.month.data))) def __add_book__(self,",
"schema_class = work_graph.value(subject=volume_or_issue, predicate=rdflib.RDF.type) if schema_class is SCHEMA.volumeNumber: volume = volume_or_issue issue =",
"profile.graph.add( (person_iri, rdflib.RDF.type, BF.Person.rdflib)) given_name = form.get(\"given_name\") if given_name is not None: profile.graph.add(",
"self.email.get(\"tls\"): email_server.starttls() body = MIMEText(body, _charset=\"UTF-8\") message.attach(body) graph_turtle = io.StringIO( self.graph.serialize(format='turtle').decode()) attachment =",
"content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git = content self.fast_subjects.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"creative-works\"): self.creative_works_git = content self.creative_works.parse(",
"statement_iri_results = connection.datastore.query( RESEARCH_STMT_IRI.format( person_iri)) if len(statement_iri_results) > 0: statement_iri = rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\"))",
"elif citation_type.startswith(\"book\"): self.__add_book__(work_iri, work_form) else: abort(500) if work_form.abstract.data != None: self.graph.add((work_iri, SCHEMA.about, rdflib.Literal(work_form.abstract.data)))",
"; schema:volumeNumber ?volumeNumber . BIND(<{0}> as ?work) BIND(\"{1}\" as ?volumeNumber) }}\"\"\".format(entity, vol_num)) if",
"rdflib.Graph() self.research_statements = rdflib.Graph() self.fast_subjects = rdflib.Graph() self.scholarship_repo = self.tutt_github.get_repo(\"cc-scholarship-graph\") for content in",
"if fast_id.startswith(\"http\"): fast_uri = fast_id else: fast_uri = \"http://id.worldcat.org/fast/{}\".format(fast_id[3:]) iri_subject = rdflib.URIRef(fast_uri) profile.graph.add(",
"data_upload.append(directory_row[1]) # Pull in the latest changes in each repository for directory in",
"new_volume) if issue is not None: issue_number = work_graph.value(subject=issue, predicate=SCHEMA.issueNumber) result = connection.datastore.query(\"\"\"SELECT",
"uuid.uuid1()) person_iri = rdflib.URIRef(person_uri) if generated_by is None: generated_by = person_iri profile.graph.add( (person_iri,",
"\"article\") if revised_by is None and current_user_email: sparql = EMAIL_LOOKUP.format( current_user_email.lower()) email_results =",
"False) citation.populate() citation.populate_article() citation.add_article() elif work_type.startswith(\"book\"): citation = utilities.Book_Citation(raw_citation, temp_work, git_profile.cc_people, False) citation.populate()",
"base64.b64decode(blob.content) if content.name.startswith(\"cc-research-statements\"): self.research_statements_git = content self.research_statements.parse( data=raw_turtle, format='turtle') if content.name.startswith(\"cc-fast-subjects\"): self.fast_subjects_git =",
"Add work_iri to Journal as last resort self.graph.add((work_iri, SCHEMA.partOf, journal)) if work_form.month.data !=",
"\"\"\" work_graph = kwargs.get(\"graph\") config = kwargs.get(\"config\") sender = config.get('EMAIL')['user'] recipients = config.get(\"ADMINS\")",
"if volume is not None: self.graph.add((issue, SCHEMA.partOf, volume)) else: self.graph.add((issue, SCHEMA.partOf, journal)) self.graph.add((work_iri,",
"for directory_row in row.get(\"data_upload\"): data_upload.append(directory_row[1]) # Pull in the latest changes in each",
"None \\ and str(current_description) != str(new_description): self.research_statements.remove((existing_stmt, SCHEMA.description, current_description)) self.research_statements.replace('\"','\\\"') self.research_statements.add((existing_stmt, SCHEMA.description, new_description))",
"if now.month < 7: start_year = now.year - 1 end_year = now.year else:",
"form.get(\"email\") profile.graph.add((person_iri, SCHEMA.email.rdflib, rdflib.Literal(email))) add_qualified_generation(profile.graph, person_iri, generated_by) dept_year = kwargs.get(\"year-iri\") if dept_year is",
"import subprocess import threading import uuid from email.mime.multipart import MIMEMultipart from email.mime.text import",
"config_manager.nsm.schema results = connection.datastore.query( EMAIL_LOOKUP.format( current_user.data.get('mail').lower())) if len(results) > 0: generated_by = rdflib.URIRef(results[0].get(\"person\").get('value'))",
"self.graph.namespace_manager.bind(\"schema\", SCHEMA) self.graph.namespace_manager.bind(\"prov\", PROV) self.email = config.get(\"EMAIL\") self.recipients = config.get(\"ADMINS\") self.person_iri = person_iri",
"citations: profile.graph.add( (rdflib.URIRef(uri), SCHEMA.author.rdflib, person_iri)) statement = form.get(\"research_stmt\") if len(statement) > 0: profile.graph.add(",
"work_form and len(work_form.url.data) > 0: self.graph.add((work_iri, SCHEMA.url, rdflib.URIRef(work_form.url.data))) if work_form.abstract.data != None: self.graph.add((work_iri,",
"SCHEMA.title, rdflib.Literal(work_form.book_title.data))) if work_form.isbn.data is not None: self.graph.add((work, SCHEMA.isbn, rdflib.Literal(work_form.isbn.data))) if work_form.editionStatement.data is",
"= GitProfile(config) current_user_email = kwargs.get(\"current_user_email\") config_manager = kwargs.get('config_manager') connection = config_manager.conns revised_by =",
"from .sparql import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI from .sparql import add_qualified_generation, add_qualified_revision BF =",
"kwargs.get(\"revised_by\") raw_citation = kwargs.get(\"citation\") work_type = kwargs.get(\"work_type\", \"article\") if revised_by is None and",
"= rdflib.Graph() temp_work.namespace_manager.bind(\"cite\", rdflib.Namespace(\"https://www.coloradocollege.edu/library/ns/citation/\")) for prefix, namespace in git_profile.cc_people.namespaces(): temp_work.namespace_manager.bind(prefix, namespace) if work_type.startswith(\"article\"):",
"= connection.datastore.query( RESEARCH_STMT_IRI.format( person_iri)) if len(statement_iri_results) > 0: statement_iri = rdflib.URIRef( statement_iri_results[0].get(\"iri\").get(\"value\")) add_qualified_revision(profile.graph,",
"git_repo=self.scholarship_repo, file_path=\"/data/cc-research-statements.ttl\", graph_name=\"research_statements\", message=\"{} Research Statement for {}\".format( action, person_label)) self.__save_graph__( git_repo=self.scholarship_repo, file_path",
"kwargs.get(\"graph\") config = kwargs.get(\"config\") sender = config.get('EMAIL')['user'] recipients = config.get(\"ADMINS\") subject = kwargs.get('subject')",
"title = kwargs.get(\"title-iri\") profile.graph.add( (dept_year_iri, rdflib.URIRef(title), person_iri)) statement = kwargs.get(\"statement\", form.get(\"research_stmt\")) if statement",
"Profile\", message) def __email_work__(**kwargs): \"\"\"Function takes a work graph and configuration and emails",
"SCHEMA.partOf, journal)) self.graph.add((work_iri, SCHEMA.partOf, issue)) elif volume is not None: self.graph.add((work_iri, SCHEMA.partOf, volume))",
"in citation: work_link = soup.new_tag(\"a\", href=citation.get(\"url\")) work_link.string = name col_2.append(work_link) else: span =",
"None: dept_year_iri = rdflib.URIRef(dept_year_iri) title = kwargs.get(\"title-iri\") profile.graph.add( (dept_year_iri, rdflib.URIRef(title), person_iri)) statement =",
"import datetime import hashlib import io import os import pprint import smtplib import",
"= kwargs.get('config_manager') connection = config_manager.conns revised_by = kwargs.get(\"revised_by\") raw_citation = kwargs.get(\"citation\") work_type =",
"statement_iri, generated_by) form_subjects = form.getlist(\"subjects\") new_subjects = {} for row in form_subjects: fast_id,",
"current_app from github import Github, GithubException import utilities from .sparql import EMAIL_LOOKUP, SUBJECTS_IRI,"
] |
[
"np.random.permutation(N) X = Xi[:,r[:N//2]] y = yi[:,r[:N//2]] Xts =Xi[:,r[N//2:]] yts =yi[:,r[N//2:]] print(X.shape, y.shape,Xts.shape,yts.shape)",
"Network from scrach @Author _ <NAME> PhD Student at Queen Mary University of",
"of Genova Conact _ http://nikeshbajaj.in n[dot]<EMAIL> bajaj[dot]<EMAIL> ''' import numpy as np import",
"print(X.shape, y.shape,Xts.shape,yts.shape) NN = deepNet(X,y,Xts=Xts, yts=yts, Net = [8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize = 0.3,",
"matplotlib.pyplot as plt from DeepNet import deepNet import DataSet as ds plt.close('all') Xi,",
"nClasses = 4,var =0.25,ShowPlot=False) [n,N] =Xi.shape r = np.random.permutation(N) X = Xi[:,r[:N//2]] y",
"[8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize = 0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0]) plt.ion() for i in range(10):",
"=Xi[:,r[N//2:]] yts =yi[:,r[N//2:]] print(X.shape, y.shape,Xts.shape,yts.shape) NN = deepNet(X,y,Xts=Xts, yts=yts, Net = [8,8,5],NetAf =['tanh'],",
"yts =yi[:,r[N//2:]] print(X.shape, y.shape,Xts.shape,yts.shape) NN = deepNet(X,y,Xts=Xts, yts=yts, Net = [8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize",
"range(10): NN.fit(itr=10) NN.PlotLCurve() NN.PlotBoundries(Layers=True) NN.PlotLCurve() NN.PlotBoundries(Layers=True) print(NN) yi,yp = NN.predict(X) yti,ytp = NN.predict(Xts)",
"@Author _ <NAME> PhD Student at Queen Mary University of London & University",
"plt.close('all') Xi, yi = ds.mclassGaus(N=500, nClasses = 4,var =0.25,ShowPlot=False) [n,N] =Xi.shape r =",
"_ <NAME> PhD Student at Queen Mary University of London & University of",
"= [8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize = 0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0]) plt.ion() for i in",
"= np.random.permutation(N) X = Xi[:,r[:N//2]] y = yi[:,r[:N//2]] Xts =Xi[:,r[N//2:]] yts =yi[:,r[N//2:]] print(X.shape,",
"Xts =Xi[:,r[N//2:]] yts =yi[:,r[N//2:]] print(X.shape, y.shape,Xts.shape,yts.shape) NN = deepNet(X,y,Xts=Xts, yts=yts, Net = [8,8,5],NetAf",
"i in range(10): NN.fit(itr=10) NN.PlotLCurve() NN.PlotBoundries(Layers=True) NN.PlotLCurve() NN.PlotBoundries(Layers=True) print(NN) yi,yp = NN.predict(X) yti,ytp",
"NN.fit(itr=10) NN.PlotLCurve() NN.PlotBoundries(Layers=True) NN.PlotLCurve() NN.PlotBoundries(Layers=True) print(NN) yi,yp = NN.predict(X) yti,ytp = NN.predict(Xts) print('Accuracy:::",
"=-1,AdamOpt=True,lambd=0,keepProb =[1.0]) plt.ion() for i in range(10): NN.fit(itr=10) NN.PlotLCurve() NN.PlotBoundries(Layers=True) NN.PlotLCurve() NN.PlotBoundries(Layers=True) print(NN)",
"alpha=0.01,miniBatchSize = 0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0]) plt.ion() for i in range(10): NN.fit(itr=10) NN.PlotLCurve()",
"import numpy as np import matplotlib.pyplot as plt from DeepNet import deepNet import",
"plt.ion() for i in range(10): NN.fit(itr=10) NN.PlotLCurve() NN.PlotBoundries(Layers=True) NN.PlotLCurve() NN.PlotBoundries(Layers=True) print(NN) yi,yp =",
"http://nikeshbajaj.in n[dot]<EMAIL> bajaj[dot]<EMAIL> ''' import numpy as np import matplotlib.pyplot as plt from",
"as plt from DeepNet import deepNet import DataSet as ds plt.close('all') Xi, yi",
"<NAME> PhD Student at Queen Mary University of London & University of Genova",
"Neural Network from scrach @Author _ <NAME> PhD Student at Queen Mary University",
"[n,N] =Xi.shape r = np.random.permutation(N) X = Xi[:,r[:N//2]] y = yi[:,r[:N//2]] Xts =Xi[:,r[N//2:]]",
"ds.mclassGaus(N=500, nClasses = 4,var =0.25,ShowPlot=False) [n,N] =Xi.shape r = np.random.permutation(N) X = Xi[:,r[:N//2]]",
"scrach @Author _ <NAME> PhD Student at Queen Mary University of London &",
"Mary University of London & University of Genova Conact _ http://nikeshbajaj.in n[dot]<EMAIL> bajaj[dot]<EMAIL>",
"=[1.0]) plt.ion() for i in range(10): NN.fit(itr=10) NN.PlotLCurve() NN.PlotBoundries(Layers=True) NN.PlotLCurve() NN.PlotBoundries(Layers=True) print(NN) yi,yp",
"for i in range(10): NN.fit(itr=10) NN.PlotLCurve() NN.PlotBoundries(Layers=True) NN.PlotLCurve() NN.PlotBoundries(Layers=True) print(NN) yi,yp = NN.predict(X)",
"Queen Mary University of London & University of Genova Conact _ http://nikeshbajaj.in n[dot]<EMAIL>",
"yts=yts, Net = [8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize = 0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0]) plt.ion() for",
"=Xi.shape r = np.random.permutation(N) X = Xi[:,r[:N//2]] y = yi[:,r[:N//2]] Xts =Xi[:,r[N//2:]] yts",
"np import matplotlib.pyplot as plt from DeepNet import deepNet import DataSet as ds",
"X = Xi[:,r[:N//2]] y = yi[:,r[:N//2]] Xts =Xi[:,r[N//2:]] yts =yi[:,r[N//2:]] print(X.shape, y.shape,Xts.shape,yts.shape) NN",
"from scrach @Author _ <NAME> PhD Student at Queen Mary University of London",
"n[dot]<EMAIL> bajaj[dot]<EMAIL> ''' import numpy as np import matplotlib.pyplot as plt from DeepNet",
"bajaj[dot]<EMAIL> ''' import numpy as np import matplotlib.pyplot as plt from DeepNet import",
"= 4,var =0.25,ShowPlot=False) [n,N] =Xi.shape r = np.random.permutation(N) X = Xi[:,r[:N//2]] y =",
"Xi, yi = ds.mclassGaus(N=500, nClasses = 4,var =0.25,ShowPlot=False) [n,N] =Xi.shape r = np.random.permutation(N)",
"DataSet as ds plt.close('all') Xi, yi = ds.mclassGaus(N=500, nClasses = 4,var =0.25,ShowPlot=False) [n,N]",
"NN.PlotBoundries(Layers=True) NN.PlotLCurve() NN.PlotBoundries(Layers=True) print(NN) yi,yp = NN.predict(X) yti,ytp = NN.predict(Xts) print('Accuracy::: Training :',100*np.sum(yi==y)/yi.shape[1],",
"from DeepNet import deepNet import DataSet as ds plt.close('all') Xi, yi = ds.mclassGaus(N=500,",
"NN.PlotBoundries(Layers=True) print(NN) yi,yp = NN.predict(X) yti,ytp = NN.predict(Xts) print('Accuracy::: Training :',100*np.sum(yi==y)/yi.shape[1], ' Testing",
"deepNet import DataSet as ds plt.close('all') Xi, yi = ds.mclassGaus(N=500, nClasses = 4,var",
"Xi[:,r[:N//2]] y = yi[:,r[:N//2]] Xts =Xi[:,r[N//2:]] yts =yi[:,r[N//2:]] print(X.shape, y.shape,Xts.shape,yts.shape) NN = deepNet(X,y,Xts=Xts,",
"y = yi[:,r[:N//2]] Xts =Xi[:,r[N//2:]] yts =yi[:,r[N//2:]] print(X.shape, y.shape,Xts.shape,yts.shape) NN = deepNet(X,y,Xts=Xts, yts=yts,",
"DeepNet import deepNet import DataSet as ds plt.close('all') Xi, yi = ds.mclassGaus(N=500, nClasses",
"ds plt.close('all') Xi, yi = ds.mclassGaus(N=500, nClasses = 4,var =0.25,ShowPlot=False) [n,N] =Xi.shape r",
"deepNet(X,y,Xts=Xts, yts=yts, Net = [8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize = 0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0]) plt.ion()",
"print(NN) yi,yp = NN.predict(X) yti,ytp = NN.predict(Xts) print('Accuracy::: Training :',100*np.sum(yi==y)/yi.shape[1], ' Testing :',100*np.sum(yti==yts)/yti.shape[1])",
"in range(10): NN.fit(itr=10) NN.PlotLCurve() NN.PlotBoundries(Layers=True) NN.PlotLCurve() NN.PlotBoundries(Layers=True) print(NN) yi,yp = NN.predict(X) yti,ytp =",
"''' import numpy as np import matplotlib.pyplot as plt from DeepNet import deepNet",
"import matplotlib.pyplot as plt from DeepNet import deepNet import DataSet as ds plt.close('all')",
"University of London & University of Genova Conact _ http://nikeshbajaj.in n[dot]<EMAIL> bajaj[dot]<EMAIL> '''",
"= ds.mclassGaus(N=500, nClasses = 4,var =0.25,ShowPlot=False) [n,N] =Xi.shape r = np.random.permutation(N) X =",
"import DataSet as ds plt.close('all') Xi, yi = ds.mclassGaus(N=500, nClasses = 4,var =0.25,ShowPlot=False)",
"London & University of Genova Conact _ http://nikeshbajaj.in n[dot]<EMAIL> bajaj[dot]<EMAIL> ''' import numpy",
"Example 3: Deep Neural Network from scrach @Author _ <NAME> PhD Student at",
"NN.PlotLCurve() NN.PlotBoundries(Layers=True) print(NN) yi,yp = NN.predict(X) yti,ytp = NN.predict(Xts) print('Accuracy::: Training :',100*np.sum(yi==y)/yi.shape[1], '",
"y.shape,Xts.shape,yts.shape) NN = deepNet(X,y,Xts=Xts, yts=yts, Net = [8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize = 0.3, printCostAt",
"= deepNet(X,y,Xts=Xts, yts=yts, Net = [8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize = 0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0])",
"0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0]) plt.ion() for i in range(10): NN.fit(itr=10) NN.PlotLCurve() NN.PlotBoundries(Layers=True) NN.PlotLCurve()",
"Genova Conact _ http://nikeshbajaj.in n[dot]<EMAIL> bajaj[dot]<EMAIL> ''' import numpy as np import matplotlib.pyplot",
"NN = deepNet(X,y,Xts=Xts, yts=yts, Net = [8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize = 0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb",
"= Xi[:,r[:N//2]] y = yi[:,r[:N//2]] Xts =Xi[:,r[N//2:]] yts =yi[:,r[N//2:]] print(X.shape, y.shape,Xts.shape,yts.shape) NN =",
"Net = [8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize = 0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0]) plt.ion() for i",
"= yi[:,r[:N//2]] Xts =Xi[:,r[N//2:]] yts =yi[:,r[N//2:]] print(X.shape, y.shape,Xts.shape,yts.shape) NN = deepNet(X,y,Xts=Xts, yts=yts, Net",
"import deepNet import DataSet as ds plt.close('all') Xi, yi = ds.mclassGaus(N=500, nClasses =",
"yi[:,r[:N//2]] Xts =Xi[:,r[N//2:]] yts =yi[:,r[N//2:]] print(X.shape, y.shape,Xts.shape,yts.shape) NN = deepNet(X,y,Xts=Xts, yts=yts, Net =",
"as ds plt.close('all') Xi, yi = ds.mclassGaus(N=500, nClasses = 4,var =0.25,ShowPlot=False) [n,N] =Xi.shape",
"at Queen Mary University of London & University of Genova Conact _ http://nikeshbajaj.in",
"as np import matplotlib.pyplot as plt from DeepNet import deepNet import DataSet as",
"Student at Queen Mary University of London & University of Genova Conact _",
"NN.PlotLCurve() NN.PlotBoundries(Layers=True) NN.PlotLCurve() NN.PlotBoundries(Layers=True) print(NN) yi,yp = NN.predict(X) yti,ytp = NN.predict(Xts) print('Accuracy::: Training",
"plt from DeepNet import deepNet import DataSet as ds plt.close('all') Xi, yi =",
"numpy as np import matplotlib.pyplot as plt from DeepNet import deepNet import DataSet",
"PhD Student at Queen Mary University of London & University of Genova Conact",
"Conact _ http://nikeshbajaj.in n[dot]<EMAIL> bajaj[dot]<EMAIL> ''' import numpy as np import matplotlib.pyplot as",
"= 0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0]) plt.ion() for i in range(10): NN.fit(itr=10) NN.PlotLCurve() NN.PlotBoundries(Layers=True)",
"<reponame>Nikeshbajaj/MachineLearningFromScratch ''' Example 3: Deep Neural Network from scrach @Author _ <NAME> PhD",
"4,var =0.25,ShowPlot=False) [n,N] =Xi.shape r = np.random.permutation(N) X = Xi[:,r[:N//2]] y = yi[:,r[:N//2]]",
"r = np.random.permutation(N) X = Xi[:,r[:N//2]] y = yi[:,r[:N//2]] Xts =Xi[:,r[N//2:]] yts =yi[:,r[N//2:]]",
"=yi[:,r[N//2:]] print(X.shape, y.shape,Xts.shape,yts.shape) NN = deepNet(X,y,Xts=Xts, yts=yts, Net = [8,8,5],NetAf =['tanh'], alpha=0.01,miniBatchSize =",
"of London & University of Genova Conact _ http://nikeshbajaj.in n[dot]<EMAIL> bajaj[dot]<EMAIL> ''' import",
"yi = ds.mclassGaus(N=500, nClasses = 4,var =0.25,ShowPlot=False) [n,N] =Xi.shape r = np.random.permutation(N) X",
"Deep Neural Network from scrach @Author _ <NAME> PhD Student at Queen Mary",
"printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0]) plt.ion() for i in range(10): NN.fit(itr=10) NN.PlotLCurve() NN.PlotBoundries(Layers=True) NN.PlotLCurve() NN.PlotBoundries(Layers=True)",
"''' Example 3: Deep Neural Network from scrach @Author _ <NAME> PhD Student",
"3: Deep Neural Network from scrach @Author _ <NAME> PhD Student at Queen",
"=['tanh'], alpha=0.01,miniBatchSize = 0.3, printCostAt =-1,AdamOpt=True,lambd=0,keepProb =[1.0]) plt.ion() for i in range(10): NN.fit(itr=10)",
"University of Genova Conact _ http://nikeshbajaj.in n[dot]<EMAIL> bajaj[dot]<EMAIL> ''' import numpy as np",
"_ http://nikeshbajaj.in n[dot]<EMAIL> bajaj[dot]<EMAIL> ''' import numpy as np import matplotlib.pyplot as plt",
"& University of Genova Conact _ http://nikeshbajaj.in n[dot]<EMAIL> bajaj[dot]<EMAIL> ''' import numpy as",
"=0.25,ShowPlot=False) [n,N] =Xi.shape r = np.random.permutation(N) X = Xi[:,r[:N//2]] y = yi[:,r[:N//2]] Xts"
] |
[
"= \"../model/CNDetector_5.h5\" converter = tf.lite.TFLiteConverter.from_keras_model_file(modelPath) converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] lite_model = converter.convert() open(\"../model/CNDetector_Lite_5.tflite\", \"wb\").write(lite_model)",
"as tf modelPath = \"../model/CNDetector_5.h5\" converter = tf.lite.TFLiteConverter.from_keras_model_file(modelPath) converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] lite_model =",
"mohammedmostafa ''' import tensorflow as tf modelPath = \"../model/CNDetector_5.h5\" converter = tf.lite.TFLiteConverter.from_keras_model_file(modelPath) converter.optimizations",
"23, 2019 @author: mohammedmostafa ''' import tensorflow as tf modelPath = \"../model/CNDetector_5.h5\" converter",
"modelPath = \"../model/CNDetector_5.h5\" converter = tf.lite.TFLiteConverter.from_keras_model_file(modelPath) converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] lite_model = converter.convert() open(\"../model/CNDetector_Lite_5.tflite\",",
"Dec 23, 2019 @author: mohammedmostafa ''' import tensorflow as tf modelPath = \"../model/CNDetector_5.h5\"",
"2019 @author: mohammedmostafa ''' import tensorflow as tf modelPath = \"../model/CNDetector_5.h5\" converter =",
"tensorflow as tf modelPath = \"../model/CNDetector_5.h5\" converter = tf.lite.TFLiteConverter.from_keras_model_file(modelPath) converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] lite_model",
"Created on Dec 23, 2019 @author: mohammedmostafa ''' import tensorflow as tf modelPath",
"on Dec 23, 2019 @author: mohammedmostafa ''' import tensorflow as tf modelPath =",
"@author: mohammedmostafa ''' import tensorflow as tf modelPath = \"../model/CNDetector_5.h5\" converter = tf.lite.TFLiteConverter.from_keras_model_file(modelPath)",
"''' import tensorflow as tf modelPath = \"../model/CNDetector_5.h5\" converter = tf.lite.TFLiteConverter.from_keras_model_file(modelPath) converter.optimizations =",
"''' Created on Dec 23, 2019 @author: mohammedmostafa ''' import tensorflow as tf",
"tf modelPath = \"../model/CNDetector_5.h5\" converter = tf.lite.TFLiteConverter.from_keras_model_file(modelPath) converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE] lite_model = converter.convert()",
"import tensorflow as tf modelPath = \"../model/CNDetector_5.h5\" converter = tf.lite.TFLiteConverter.from_keras_model_file(modelPath) converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]"
] |
[
"import jpnn from .lstm_cnn_crf_exp import lstm_cnn_crf from .lstm_crf_exp import lstm_crf from .lstm_dist_exp import",
"jpnn from .lstm_cnn_crf_exp import lstm_cnn_crf from .lstm_crf_exp import lstm_crf from .lstm_dist_exp import lstm_dist",
"lstm_cnn_crf from .lstm_crf_exp import lstm_crf from .lstm_dist_exp import lstm_dist from .lstm_er_exp import lstm_er",
"from .jpnn_exp import jpnn from .lstm_cnn_crf_exp import lstm_cnn_crf from .lstm_crf_exp import lstm_crf from",
".jpnn_exp import jpnn from .lstm_cnn_crf_exp import lstm_cnn_crf from .lstm_crf_exp import lstm_crf from .lstm_dist_exp",
".lstm_cnn_crf_exp import lstm_cnn_crf from .lstm_crf_exp import lstm_crf from .lstm_dist_exp import lstm_dist from .lstm_er_exp",
"from .lstm_cnn_crf_exp import lstm_cnn_crf from .lstm_crf_exp import lstm_crf from .lstm_dist_exp import lstm_dist from",
"import lstm_cnn_crf from .lstm_crf_exp import lstm_crf from .lstm_dist_exp import lstm_dist from .lstm_er_exp import"
] |
[
"datetime.utcnow() # Run the SPoCA jobs try: CH_maps = run_spoca_jobs(start_date, end_date, args.tracked_maps, args.untracked_maps)",
"to the centers file classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' # The frequency to run the",
"program on AIA_wavelengths = [193] def date_range(start, end, step): '''Equivalent to range for",
"in form YYYY-MM-DD') parser.add_argument('--tracked_maps', '-t', metavar = 'MAP', nargs='*', help = 'File paths",
"!= 0: raise JobError(return_code, output, error, job_name = 'get_CH_map', segmented_map = segmented_map) elif",
"# Path to the centers file classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' # The frequency to",
"appropriate parameters job = Job(tracking_exec, *maps, config = tracking_config_file, maxDeltaT = (tracking_overlap *",
"maps = tracked_maps[-tracking_overlap:] + untracked_maps # last color of the previous tracking last_color",
"the AIA files for the classification try: AIA_images = get_AIA_files(date, AIA_wavelengths) except FileNotFoundError",
"have enough untracked maps, we run the tracking program if len(untracked_maps) >= tracking_run_count:",
"maps if untracked_maps: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) return newly_tracked_maps #",
"help = 'Start date of AIA files, in form YYYY-MM-DD') parser.add_argument('--end_date', '-e', default",
"% (date, wavelengths)) else: file_paths.append(file_path) return file_paths def get_HMI_files(date): '''Return a list of",
"the prepped AIA files are located aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' # Directory where the",
"last_color = int(text.split(':')[1]) except Exception as why: logging.warning('Could not read tracking color from",
"%s with bad quality: %s', file_path, get_quality_errors(quality)) def get_AIA_files(date, wavelengths): '''Return a list",
"None, untracked_maps = None): '''Run the SPoCA jobs to create and track the",
"try: with open(tracking_color_file, 'tw') as f: f.write(output) except Exception as why: logging.error('Could not",
"Run the get_CH_map job return_code, output, error = job() # Check if the",
"and wavelength %s was not found' % (date, wavelengths)) else: file_paths.append(file_path) return file_paths",
"date of AIA files, in form YYYY-MM-DD') parser.add_argument('--end_date', '-e', default = datetime.utcnow().strftime('%Y-%m-%d'), help",
"are missing, we just won't have HMI stats for the CH HMI_images =",
"to the get_CH_map program get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' # Path to the get_CH_map program",
"def get_HMI_files(date): '''Return a list of HMI files for the specified date''' file_path",
"quality of the file if ignore_bits is None: quality = get_quality(file_path) else: quality",
"given, we assume none are if untracked_maps is None: untracked_maps = list() else:",
"YYYY-MM-DD') parser.add_argument('--end_date', '-e', default = datetime.utcnow().strftime('%Y-%m-%d'), help = 'End date of AIA files,",
"found' % date) else: return [file_path] def create_segmented_map(AIA_images, date): '''Run the classification program'''",
"'tw') as f: f.write(output) except Exception as why: logging.error('Could not write tracking color",
"from job import Job, JobError from AIA_quality import get_quality, get_quality_errors # Path to",
"'/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' # The minimum number of files that overlaps with the previous tracking",
"date += step def parse_tracking_color_file(tracking_color_file): try: with open(tracking_color_file, 'tr') as f: text =",
"paths of previously tracked CH maps') parser.add_argument('--untracked_maps', '-u', metavar = 'MAP', nargs='*', help",
"'File paths of not yet tracked CH maps') args = parser.parse_args() # Setup",
"the get_CH_map program''' # File path for the CH map to create CH_map",
"frequency to run the classification program classification_run_frequency = timedelta(hours = 4) # Path",
"+ '.SegmentedMap.fits') # Create a job for the classification program with the appropriate",
"program config file classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config' # Path to the centers file classification_centers_file",
"minimum number of files that overlaps with the previous tracking (see maxDeltaT) tracking_overlap",
"file {segmented_map}', segmented_map = segmented_map) else: logging.debug('Job ran without errors, output: %s', output)",
"specified date''' file_path = get_good_file(hmi_file_pattern.format(date=date)) if file_path is None: raise FileNotFoundError('HMI file for",
"in untracked_maps: tracked_maps = list(filter(lambda tracked_map: not os.path.samefile(tracked_map, untracked_map), tracked_maps)) # We will",
"CH map CH_map = create_CH_map(segmented_map, date, AIA_images + HMI_images) # Add the CH",
"given, we assumed all existing are if tracked_maps is None: tracked_maps = sorted(glob(os.path.join(maps_directory,",
"get_CH_map program get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' # Path to the get_CH_map program config file",
"range for date''' date = start.replace() while date < end: yield date date",
"= CH_map) logging.info('Running job\\n%s', job) # Run the get_CH_map job return_code, output, error",
"newly_tracked_maps + untracked_maps def run_spoca_jobs(start_date, end_date, tracked_maps = None, untracked_maps = None): '''Run",
"no tracked maps were given, we assumed all existing are if tracked_maps is",
"located aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' # Directory where the prepped HMI files are located",
"end_date, classification_run_frequency): # Get the AIA files for the classification try: AIA_images =",
"get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength)) if file_path is None: raise FileNotFoundError('AIA file for date %s and",
"a good quality''' for file_path in sorted(glob(file_pattern)): # Get the quality of the",
"the CH map to create CH_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits') # Create",
"map to create CH_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits') # Create a job",
"untracked_map), tracked_maps)) # We will return the list of all newly tracked maps",
"%s', tracking_color_file, why) return tracked_maps + untracked_maps, [], newly_tracked_maps + untracked_maps def run_spoca_jobs(start_date,",
"segmented_map def create_CH_map(segmented_map, date, images): '''Run the get_CH_map program''' # File path for",
"'''Return a list of HMI files for the specified date''' file_path = get_good_file(hmi_file_pattern.format(date=date))",
"# Run the get_CH_map job return_code, output, error = job() # Check if",
"= 'File paths of previously tracked CH maps') parser.add_argument('--untracked_maps', '-u', metavar = 'MAP',",
"if file_path is None: raise FileNotFoundError('HMI file for date %s was not found'",
"tracking_run_count: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) else: logging.debug('Not enough maps to",
"from datetime import datetime, timedelta from job import Job, JobError from AIA_quality import",
"AIA files, in form YYYY-MM-DD') parser.add_argument('--tracked_maps', '-t', metavar = 'MAP', nargs='*', help =",
"logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s', filename=args.log_file) else: logging.basicConfig(level = logging.DEBUG if args.debug",
"on tracking_run_count = 3 # Directory to output the maps maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/'",
"and wavelengths''' file_paths = list() for wavelength in wavelengths: file_path = get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength))",
"all existing are if tracked_maps is None: tracked_maps = sorted(glob(os.path.join(maps_directory, '*.CHMap.fits'))) # If",
"the prepped HMI files are located hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' # Wavelengths of AIA",
"date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits') # Create a job for the classification program with the",
"a job for the tracking program with the appropriate parameters job = Job(tracking_exec,",
"job\\n%s', job) # Run the tracking job return_code, output, error = job() #",
"logging.warning('Missing AIA files for date %s, skipping missing files!', date) continue # Get",
"'/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' # Path to the tracking program tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x' # Path to",
"= '/opt/spoca4rwc/SPoCA/bin/tracking.x' # Path to the tracking program config file tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config'",
"means no defect if quality == 0: return file_path else: logging.info('Skipping file %s",
"the file if ignore_bits is None: quality = get_quality(file_path) else: quality = get_quality(file_path,",
"succesfully if return_code != 0: raise JobError(return_code, output, error, job_name = 'tracking', maps",
"parse_tracking_color_file(tracking_color_file): try: with open(tracking_color_file, 'tr') as f: text = f.readline() last_color = int(text.split(':')[1])",
"!= 0: raise JobError(return_code, output, error, job_name = 'tracking', maps = maps) else:",
"why) return 0 else: logging.debug('Found last color %s from file %s', last_color, tracking_color_file)",
"len(untracked_maps) >= tracking_run_count: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) else: logging.debug('Not enough",
"= int(text.split(':')[1]) except Exception as why: logging.warning('Could not read tracking color from file",
"= 'Start date of AIA files, in form YYYY-MM-DD') parser.add_argument('--end_date', '-e', default =",
"get_quality(file_path, ignore_bits) # A quality of 0 means no defect if quality ==",
"paths of not yet tracked CH maps') args = parser.parse_args() # Setup the",
"else: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s')",
"Parse the start and end date start_date = datetime.strptime(args.start_date, '%Y-%m-%d') end_date = datetime.strptime(args.end_date,",
"untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) else: logging.debug('Not enough maps to run tracking,",
"to the list of untracked maps untracked_maps.append(CH_map) # If we have enough untracked",
"job_name = 'tracking', maps = maps) else: logging.debug('Job ran without errors, output: %s',",
"logging level to debug') parser.add_argument('--log_file', '-l', help = 'The file path of the",
"untracked maps, we run the tracking program if len(untracked_maps) >= tracking_run_count: tracked_maps, untracked_maps,",
"the tracking program config file tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config' # Path to the tracking",
"get_HMI_files(date): '''Return a list of HMI files for the specified date''' file_path =",
"program get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' # Path to the get_CH_map program config file get_CH_map_config_file",
"job) # Run the classification job return_code, output, error = job() # Check",
"# Parse the start and end date start_date = datetime.strptime(args.start_date, '%Y-%m-%d') end_date =",
"SPoCA jobs to create and track the CHMaps''' # If no tracked maps",
"Directory where the prepped HMI files are located hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' # Wavelengths",
"output: %s', output) try: with open(tracking_color_file, 'tw') as f: f.write(output) except Exception as",
"last color %s from file %s', last_color, tracking_color_file) return last_color def get_good_file(file_pattern, ignore_bits",
"CH_map) else: logging.debug('Job ran without errors, output: %s', output) return CH_map def track_maps(tracked_maps,",
"we run the tracking program if len(untracked_maps) >= tracking_run_count: tracked_maps, untracked_maps, newly_tracked_maps =",
"run the tracking program if len(untracked_maps) >= tracking_run_count: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps,",
"config = classification_config_file, centersFile = classification_centers_file, output = segmented_map) logging.info('Running job\\n%s', job) #",
"None): '''Return the first file that matches the file_pattern and has a good",
"if tracked_maps is None: tracked_maps = sorted(glob(os.path.join(maps_directory, '*.CHMap.fits'))) # If no untracked maps",
"Get the arguments parser = argparse.ArgumentParser(description = 'Create and track CH maps') parser.add_argument('--debug',",
"yet tracked CH maps') args = parser.parse_args() # Setup the logging if args.log_file:",
"Get the quality of the file if ignore_bits is None: quality = get_quality(file_path)",
"%s', last_color, tracking_color_file) return last_color def get_good_file(file_pattern, ignore_bits = None): '''Return the first",
"without errors, output: %s', output) return segmented_map def create_CH_map(segmented_map, date, images): '''Run the",
"'End date of AIA files, in form YYYY-MM-DD') parser.add_argument('--tracked_maps', '-t', metavar = 'MAP',",
"untracked maps from the tracked maps for untracked_map in untracked_maps: tracked_maps = list(filter(lambda",
"Run the SPoCA jobs try: CH_maps = run_spoca_jobs(start_date, end_date, args.tracked_maps, args.untracked_maps) except Exception",
"is None: raise FileNotFoundError('AIA file for date %s and wavelength %s was not",
"'.CHMap.fits') # Create a job for the get_CH_map program with the appropriate parameters",
"logging.debug('Found last color %s from file %s', last_color, tracking_color_file) return last_color def get_good_file(file_pattern,",
"%(levelname)-8s : %(message)s') # Parse the start and end date start_date = datetime.strptime(args.start_date,",
"* classification_run_frequency).total_seconds(), newColor = last_color) logging.info('Running job\\n%s', job) # Run the tracking job",
"ignore_bits is None: quality = get_quality(file_path) else: quality = get_quality(file_path, ignore_bits) # A",
"file_path, get_quality_errors(quality)) def get_AIA_files(date, wavelengths): '''Return a list of AIA files for the",
"from the tracked maps for untracked_map in untracked_maps: tracked_maps = list(filter(lambda tracked_map: not",
"Create the CH map CH_map = create_CH_map(segmented_map, date, AIA_images + HMI_images) # Add",
"untracked_map in untracked_maps: tracked_maps = list(filter(lambda tracked_map: not os.path.samefile(tracked_map, untracked_map), tracked_maps)) # We",
"if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s', filename=args.log_file) else: logging.basicConfig(level =",
"file {CH_map}', CH_map = CH_map) else: logging.debug('Job ran without errors, output: %s', output)",
"appropriate parameters job = Job(classification_exec, *AIA_images, config = classification_config_file, centersFile = classification_centers_file, output",
"get_CH_map program''' # File path for the CH map to create CH_map =",
"ran without errors, output: %s', output) try: with open(tracking_color_file, 'tw') as f: f.write(output)",
"in date_range(start_date, end_date, classification_run_frequency): # Get the AIA files for the classification try:",
"wavelengths): '''Return a list of AIA files for the specified date and wavelengths'''",
"paths of the maps to run the tracking on maps = tracked_maps[-tracking_overlap:] +",
"file \"%s\": %s', tracking_color_file, why) return tracked_maps + untracked_maps, [], newly_tracked_maps + untracked_maps",
"'/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' # Directory where the prepped HMI files are located hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits'",
"'''Equivalent to range for date''' date = start.replace() while date < end: yield",
"classification program''' # File path for the Segmented map to create segmented_map =",
"python3 import os import sys import logging import argparse from glob import glob",
"output) return CH_map def track_maps(tracked_maps, untracked_maps, newly_tracked_maps): '''Run the tracking program''' # File",
"= list() else: # We need to remove the untracked maps from the",
"config file get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' # Path to the tracking program tracking_exec =",
"# The minimum number of files that overlaps with the previous tracking (see",
"is None: raise FileNotFoundError('HMI file for date %s was not found' % date)",
"date) else: return [file_path] def create_segmented_map(AIA_images, date): '''Run the classification program''' # File",
"else: logging.info('Skipping file %s with bad quality: %s', file_path, get_quality_errors(quality)) def get_AIA_files(date, wavelengths):",
"else: quality = get_quality(file_path, ignore_bits) # A quality of 0 means no defect",
"AIA_images) elif not os.path.exists(segmented_map): raise JobError(message = 'Could not find output file {segmented_map}',",
"# If no tracked maps were given, we assumed all existing are if",
"tracked_map: not os.path.samefile(tracked_map, untracked_map), tracked_maps)) # We will return the list of all",
"= 'store_true', help = 'Set the logging level to debug') parser.add_argument('--log_file', '-l', help",
"classification_run_frequency).total_seconds(), newColor = last_color) logging.info('Running job\\n%s', job) # Run the tracking job return_code,",
"run_spoca_jobs(start_date, end_date, tracked_maps = None, untracked_maps = None): '''Run the SPoCA jobs to",
"list() for wavelength in wavelengths: file_path = get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength)) if file_path is None:",
"newly_tracked_maps): '''Run the tracking program''' # File paths of the maps to run",
"get_quality_errors # Path to the classification program classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x' # Path to",
"with bad quality: %s', file_path, get_quality_errors(quality)) def get_AIA_files(date, wavelengths): '''Return a list of",
"job for the get_CH_map program with the appropriate parameters job = Job(get_CH_map_exec, segmented_map,",
"'''Run the tracking program''' # File paths of the maps to run the",
"are located aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' # Directory where the prepped HMI files are",
"classification_run_frequency = timedelta(hours = 4) # Path to the get_CH_map program get_CH_map_exec =",
"number of files that overlaps with the previous tracking (see maxDeltaT) tracking_overlap =",
"import sys import logging import argparse from glob import glob from datetime import",
"file %s', last_color, tracking_color_file) return last_color def get_good_file(file_pattern, ignore_bits = None): '''Return the",
"# The number of CH maps to run the tracking program on tracking_run_count",
">= tracking_run_count: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) else: logging.debug('Not enough maps",
"from file \"%s\": %s', tracking_color_file, why) return 0 else: logging.debug('Found last color %s",
"format='%(asctime)s : %(levelname)-8s : %(message)s') # Parse the start and end date start_date",
"tracking program config file tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config' # Path to the tracking color",
"try: with open(tracking_color_file, 'tr') as f: text = f.readline() last_color = int(text.split(':')[1]) except",
"%s, skipping missing files!', date) continue # Get the list of HMI images",
"try: AIA_images = get_AIA_files(date, AIA_wavelengths) except FileNotFoundError as why: logging.warning('Missing AIA files for",
"untracked_maps.append(CH_map) # If we have enough untracked maps, we run the tracking program",
"# File path for the Segmented map to create segmented_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S')",
"CH_map def track_maps(tracked_maps, untracked_maps, newly_tracked_maps): '''Run the tracking program''' # File paths of",
"= None): '''Run the SPoCA jobs to create and track the CHMaps''' #",
"help = 'Set the logging level to debug') parser.add_argument('--log_file', '-l', help = 'The",
"with the appropriate parameters job = Job(tracking_exec, *maps, config = tracking_config_file, maxDeltaT =",
"the list of HMI images try: HMI_images = get_HMI_files(date) except FileNotFoundError as why:",
"return file_path else: logging.info('Skipping file %s with bad quality: %s', file_path, get_quality_errors(quality)) def",
"program with the appropriate parameters job = Job(classification_exec, *AIA_images, config = classification_config_file, centersFile",
"logging.debug('Job ran without errors, output: %s', output) try: with open(tracking_color_file, 'tw') as f:",
"tracked_maps)) # We will return the list of all newly tracked maps newly_tracked_maps",
"stats for the CH HMI_images = list() # Create the Segmented map segmented_map",
"None: untracked_maps = list() else: # We need to remove the untracked maps",
"[193] def date_range(start, end, step): '''Equivalent to range for date''' date = start.replace()",
"get_CH_map program config file get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' # Path to the tracking program",
"wavelengths)) else: file_paths.append(file_path) return file_paths def get_HMI_files(date): '''Return a list of HMI files",
"none are if untracked_maps is None: untracked_maps = list() else: # We need",
"[], newly_tracked_maps + untracked_maps def run_spoca_jobs(start_date, end_date, tracked_maps = None, untracked_maps = None):",
"parse_tracking_color_file(tracking_color_file) # Create a job for the tracking program with the appropriate parameters",
"not os.path.samefile(tracked_map, untracked_map), tracked_maps)) # We will return the list of all newly",
"if args.end_date else datetime.utcnow() # Run the SPoCA jobs try: CH_maps = run_spoca_jobs(start_date,",
"If no tracked maps were given, we assumed all existing are if tracked_maps",
"CH maps') args = parser.parse_args() # Setup the logging if args.log_file: logging.basicConfig(level =",
"'''Return a list of AIA files for the specified date and wavelengths''' file_paths",
"config file classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config' # Path to the centers file classification_centers_file =",
"logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s') # Parse the start and end date",
"need %s but have only %s', tracking_run_count, len(untracked_maps)) # Track the remaing untracked",
"to create and track the CHMaps''' # If no tracked maps were given,",
"of HMI images try: HMI_images = get_HMI_files(date) except FileNotFoundError as why: # It's",
"= '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' # Path to the get_CH_map program config file get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config'",
"Track the remaing untracked maps if untracked_maps: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps,",
"'Set the logging level to debug') parser.add_argument('--log_file', '-l', help = 'The file path",
"The minimum number of files that overlaps with the previous tracking (see maxDeltaT)",
"end date start_date = datetime.strptime(args.start_date, '%Y-%m-%d') end_date = datetime.strptime(args.end_date, '%Y-%m-%d') if args.end_date else",
"for date in date_range(start_date, end_date, classification_run_frequency): # Get the AIA files for the",
"date, AIA_images + HMI_images) # Add the CH map to the list of",
"untracked_maps def run_spoca_jobs(start_date, end_date, tracked_maps = None, untracked_maps = None): '''Run the SPoCA",
"to the tracking program config file tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config' # Path to the",
"the CHMaps''' # If no tracked maps were given, we assumed all existing",
"if untracked_maps: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) return newly_tracked_maps # Start",
"'/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' # Wavelengths of AIA data to run the classification program on AIA_wavelengths",
"*maps, config = tracking_config_file, maxDeltaT = (tracking_overlap * classification_run_frequency).total_seconds(), newColor = last_color) logging.info('Running",
"maps to run the tracking program on tracking_run_count = 3 # Directory to",
"previous tracking (see maxDeltaT) tracking_overlap = 6 # The number of CH maps",
"# If no untracked maps were given, we assume none are if untracked_maps",
"form YYYY-MM-DD') parser.add_argument('--tracked_maps', '-t', metavar = 'MAP', nargs='*', help = 'File paths of",
"files for the specified date''' file_path = get_good_file(hmi_file_pattern.format(date=date)) if file_path is None: raise",
"output, error, job_name = 'classification', AIA_images = AIA_images) elif not os.path.exists(segmented_map): raise JobError(message",
"CH map to create CH_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits') # Create a",
"date in date_range(start_date, end_date, classification_run_frequency): # Get the AIA files for the classification",
"= classification_centers_file, output = segmented_map) logging.info('Running job\\n%s', job) # Run the classification job",
"FileNotFoundError as why: logging.warning('Missing AIA files for date %s, skipping missing files!', date)",
"# A quality of 0 means no defect if quality == 0: return",
"= 'MAP', nargs='*', help = 'File paths of not yet tracked CH maps')",
"to run the tracking program on tracking_run_count = 3 # Directory to output",
"default = '2010-05-20', help = 'Start date of AIA files, in form YYYY-MM-DD')",
"the tracking color file tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' # The minimum number of files",
"job = Job(classification_exec, *AIA_images, config = classification_config_file, centersFile = classification_centers_file, output = segmented_map)",
"= job() # Check if the job ran succesfully if return_code != 0:",
"maps newly_tracked_maps = list() # Start the loop for date in date_range(start_date, end_date,",
"the specified date and wavelengths''' file_paths = list() for wavelength in wavelengths: file_path",
"def create_segmented_map(AIA_images, date): '''Run the classification program''' # File path for the Segmented",
"tracked maps were given, we assumed all existing are if tracked_maps is None:",
"untracked_maps = list() else: # We need to remove the untracked maps from",
"If we have enough untracked maps, we run the tracking program if len(untracked_maps)",
"= 'Create and track CH maps') parser.add_argument('--debug', '-d', default = False, action =",
"# last color of the previous tracking last_color = parse_tracking_color_file(tracking_color_file) # Create a",
"# Check if the job ran succesfully if return_code != 0: raise JobError(return_code,",
"= timedelta(hours = 4) # Path to the get_CH_map program get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x'",
"raise JobError(message = 'Could not find output file {segmented_map}', segmented_map = segmented_map) else:",
"= 'get_CH_map', segmented_map = segmented_map) elif not os.path.exists(CH_map): raise JobError(message = 'Could not",
"maps) else: logging.debug('Job ran without errors, output: %s', output) try: with open(tracking_color_file, 'tw')",
"f.write(output) except Exception as why: logging.error('Could not write tracking color to file \"%s\":",
"Exception as why: logging.warning('Could not read tracking color from file \"%s\": %s', tracking_color_file,",
"It's okay if HMI files are missing, we just won't have HMI stats",
"tracking program tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x' # Path to the tracking program config file",
"get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' # Path to the tracking program tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x' #",
"end, step): '''Equivalent to range for date''' date = start.replace() while date <",
"= last_color) logging.info('Running job\\n%s', job) # Run the tracking job return_code, output, error",
"newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) else: logging.debug('Not enough maps to run tracking, need",
"only %s', tracking_run_count, len(untracked_maps)) # Track the remaing untracked maps if untracked_maps: tracked_maps,",
"not yet tracked CH maps') args = parser.parse_args() # Setup the logging if",
"%s from file %s', last_color, tracking_color_file) return last_color def get_good_file(file_pattern, ignore_bits = None):",
"parameters job = Job(classification_exec, *AIA_images, config = classification_config_file, centersFile = classification_centers_file, output =",
"file_path in sorted(glob(file_pattern)): # Get the quality of the file if ignore_bits is",
"with open(tracking_color_file, 'tr') as f: text = f.readline() last_color = int(text.split(':')[1]) except Exception",
"CH maps') parser.add_argument('--untracked_maps', '-u', metavar = 'MAP', nargs='*', help = 'File paths of",
"= os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits') # Create a job for the get_CH_map program",
"were given, we assumed all existing are if tracked_maps is None: tracked_maps =",
"else datetime.utcnow() # Run the SPoCA jobs try: CH_maps = run_spoca_jobs(start_date, end_date, args.tracked_maps,",
"filename=args.log_file) else: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s :",
"first file that matches the file_pattern and has a good quality''' for file_path",
"will return the list of all newly tracked maps newly_tracked_maps = list() #",
"create segmented_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits') # Create a job for the",
"except FileNotFoundError as why: logging.warning('Missing AIA files for date %s, skipping missing files!',",
"for the get_CH_map program with the appropriate parameters job = Job(get_CH_map_exec, segmented_map, *images,",
"try: HMI_images = get_HMI_files(date) except FileNotFoundError as why: # It's okay if HMI",
"start.replace() while date < end: yield date date += step def parse_tracking_color_file(tracking_color_file): try:",
"Job(classification_exec, *AIA_images, config = classification_config_file, centersFile = classification_centers_file, output = segmented_map) logging.info('Running job\\n%s',",
"appropriate parameters job = Job(get_CH_map_exec, segmented_map, *images, config = get_CH_map_config_file, output = CH_map)",
"segmented_map) elif not os.path.exists(CH_map): raise JobError(message = 'Could not find output file {CH_map}',",
"of the log file') parser.add_argument('--start_date', '-s', default = '2010-05-20', help = 'Start date",
"= f.readline() last_color = int(text.split(':')[1]) except Exception as why: logging.warning('Could not read tracking",
"maps') args = parser.parse_args() # Setup the logging if args.log_file: logging.basicConfig(level = logging.DEBUG",
"= None, untracked_maps = None): '''Run the SPoCA jobs to create and track",
"the tracking program if len(untracked_maps) >= tracking_run_count: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps,",
"get_quality, get_quality_errors # Path to the classification program classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x' # Path",
"Path to the centers file classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' # The frequency to run",
"we assume none are if untracked_maps is None: untracked_maps = list() else: #",
"'File paths of previously tracked CH maps') parser.add_argument('--untracked_maps', '-u', metavar = 'MAP', nargs='*',",
"= datetime.strptime(args.end_date, '%Y-%m-%d') if args.end_date else datetime.utcnow() # Run the SPoCA jobs try:",
"def track_maps(tracked_maps, untracked_maps, newly_tracked_maps): '''Run the tracking program''' # File paths of the",
"why: logging.error('Could not write tracking color to file \"%s\": %s', tracking_color_file, why) return",
"the classification program classification_run_frequency = timedelta(hours = 4) # Path to the get_CH_map",
"to the tracking program tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x' # Path to the tracking program",
"'''Run the SPoCA jobs to create and track the CHMaps''' # If no",
"os.path.samefile(tracked_map, untracked_map), tracked_maps)) # We will return the list of all newly tracked",
"hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' # Wavelengths of AIA data to run the classification program",
"classification program on AIA_wavelengths = [193] def date_range(start, end, step): '''Equivalent to range",
"tracking_overlap = 6 # The number of CH maps to run the tracking",
"4) # Path to the get_CH_map program get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' # Path to",
"# File paths of the maps to run the tracking on maps =",
"date date += step def parse_tracking_color_file(tracking_color_file): try: with open(tracking_color_file, 'tr') as f: text",
"maxDeltaT) tracking_overlap = 6 # The number of CH maps to run the",
"Start the loop for date in date_range(start_date, end_date, classification_run_frequency): # Get the AIA",
"CH_map = create_CH_map(segmented_map, date, AIA_images + HMI_images) # Add the CH map to",
"the list of untracked maps untracked_maps.append(CH_map) # If we have enough untracked maps,",
"len(untracked_maps)) # Track the remaing untracked maps if untracked_maps: tracked_maps, untracked_maps, newly_tracked_maps =",
"except Exception as why: logging.error('Could not write tracking color to file \"%s\": %s',",
"files are located hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' # Wavelengths of AIA data to run",
"with open(tracking_color_file, 'tw') as f: f.write(output) except Exception as why: logging.error('Could not write",
"to the get_CH_map program config file get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' # Path to the",
"output, error, job_name = 'tracking', maps = maps) else: logging.debug('Job ran without errors,",
"import get_quality, get_quality_errors # Path to the classification program classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x' #",
"not read tracking color from file \"%s\": %s', tracking_color_file, why) return 0 else:",
"<gh_stars>0 #!/usr/bin/env python3 import os import sys import logging import argparse from glob",
"files for the classification try: AIA_images = get_AIA_files(date, AIA_wavelengths) except FileNotFoundError as why:",
"# Track the remaing untracked maps if untracked_maps: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps,",
"# The frequency to run the classification program classification_run_frequency = timedelta(hours = 4)",
"file %s with bad quality: %s', file_path, get_quality_errors(quality)) def get_AIA_files(date, wavelengths): '''Return a",
"classification job return_code, output, error = job() # Check if the job ran",
"'/opt/spoca4rwc/scripts/AIA_CH_classification.config' # Path to the centers file classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' # The frequency",
"*images, config = get_CH_map_config_file, output = CH_map) logging.info('Running job\\n%s', job) # Run the",
"'/opt/spoca4rwc/SPoCA/bin/classification.x' # Path to the classification program config file classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config' #",
"job) # Run the get_CH_map job return_code, output, error = job() # Check",
"in sorted(glob(file_pattern)): # Get the quality of the file if ignore_bits is None:",
"None: tracked_maps = sorted(glob(os.path.join(maps_directory, '*.CHMap.fits'))) # If no untracked maps were given, we",
"FileNotFoundError('AIA file for date %s and wavelength %s was not found' % (date,",
"= [193] def date_range(start, end, step): '''Equivalent to range for date''' date =",
"to output the maps maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/' # Directory where the prepped AIA",
"job_name = 'get_CH_map', segmented_map = segmented_map) elif not os.path.exists(CH_map): raise JobError(message = 'Could",
"of CH maps to run the tracking program on tracking_run_count = 3 #",
"'-u', metavar = 'MAP', nargs='*', help = 'File paths of not yet tracked",
"located hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' # Wavelengths of AIA data to run the classification",
"of 0 means no defect if quality == 0: return file_path else: logging.info('Skipping",
"datetime.strptime(args.start_date, '%Y-%m-%d') end_date = datetime.strptime(args.end_date, '%Y-%m-%d') if args.end_date else datetime.utcnow() # Run the",
"YYYY-MM-DD') parser.add_argument('--tracked_maps', '-t', metavar = 'MAP', nargs='*', help = 'File paths of previously",
"tracked_maps + untracked_maps, [], newly_tracked_maps + untracked_maps def run_spoca_jobs(start_date, end_date, tracked_maps = None,",
"def create_CH_map(segmented_map, date, images): '''Run the get_CH_map program''' # File path for the",
"tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) else: logging.debug('Not enough maps to run",
"= get_quality(file_path) else: quality = get_quality(file_path, ignore_bits) # A quality of 0 means",
"in wavelengths: file_path = get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength)) if file_path is None: raise FileNotFoundError('AIA file",
"for date %s, skipping missing files!', date) continue # Get the list of",
"to run the classification program on AIA_wavelengths = [193] def date_range(start, end, step):",
"not os.path.exists(segmented_map): raise JobError(message = 'Could not find output file {segmented_map}', segmented_map =",
"the tracking program''' # File paths of the maps to run the tracking",
"= list(filter(lambda tracked_map: not os.path.samefile(tracked_map, untracked_map), tracked_maps)) # We will return the list",
"tracking_run_count, len(untracked_maps)) # Track the remaing untracked maps if untracked_maps: tracked_maps, untracked_maps, newly_tracked_maps",
"= datetime.utcnow().strftime('%Y-%m-%d'), help = 'End date of AIA files, in form YYYY-MM-DD') parser.add_argument('--tracked_maps',",
": %(message)s', filename=args.log_file) else: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s :",
"= Job(tracking_exec, *maps, config = tracking_config_file, maxDeltaT = (tracking_overlap * classification_run_frequency).total_seconds(), newColor =",
"the classification job return_code, output, error = job() # Check if the job",
"\"%s\": %s', tracking_color_file, why) return tracked_maps + untracked_maps, [], newly_tracked_maps + untracked_maps def",
"segmented_map = segmented_map) else: logging.debug('Job ran without errors, output: %s', output) return segmented_map",
"= CH_map) else: logging.debug('Job ran without errors, output: %s', output) return CH_map def",
"'-d', default = False, action = 'store_true', help = 'Set the logging level",
"are located hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' # Wavelengths of AIA data to run the",
"untracked_maps: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) return newly_tracked_maps # Start point",
"HMI_images) # Add the CH map to the list of untracked maps untracked_maps.append(CH_map)",
"= 'End date of AIA files, in form YYYY-MM-DD') parser.add_argument('--tracked_maps', '-t', metavar =",
"of AIA files for the specified date and wavelengths''' file_paths = list() for",
"segmented_map, *images, config = get_CH_map_config_file, output = CH_map) logging.info('Running job\\n%s', job) # Run",
"maps were given, we assumed all existing are if tracked_maps is None: tracked_maps",
"the remaing untracked maps if untracked_maps: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps)",
"= 'tracking', maps = maps) else: logging.debug('Job ran without errors, output: %s', output)",
"== '__main__': # Get the arguments parser = argparse.ArgumentParser(description = 'Create and track",
"Get the list of HMI images try: HMI_images = get_HMI_files(date) except FileNotFoundError as",
"to file \"%s\": %s', tracking_color_file, why) return tracked_maps + untracked_maps, [], newly_tracked_maps +",
"files, in form YYYY-MM-DD') parser.add_argument('--tracked_maps', '-t', metavar = 'MAP', nargs='*', help = 'File",
"files!', date) continue # Get the list of HMI images try: HMI_images =",
"the appropriate parameters job = Job(get_CH_map_exec, segmented_map, *images, config = get_CH_map_config_file, output =",
"%s', output) try: with open(tracking_color_file, 'tw') as f: f.write(output) except Exception as why:",
"'get_CH_map', segmented_map = segmented_map) elif not os.path.exists(CH_map): raise JobError(message = 'Could not find",
"# Get the list of HMI images try: HMI_images = get_HMI_files(date) except FileNotFoundError",
"of AIA files, in form YYYY-MM-DD') parser.add_argument('--tracked_maps', '-t', metavar = 'MAP', nargs='*', help",
"(see maxDeltaT) tracking_overlap = 6 # The number of CH maps to run",
"a job for the classification program with the appropriate parameters job = Job(classification_exec,",
"help = 'The file path of the log file') parser.add_argument('--start_date', '-s', default =",
"maps to run the tracking on maps = tracked_maps[-tracking_overlap:] + untracked_maps # last",
"parser.add_argument('--end_date', '-e', default = datetime.utcnow().strftime('%Y-%m-%d'), help = 'End date of AIA files, in",
"raise FileNotFoundError('AIA file for date %s and wavelength %s was not found' %",
"# Create a job for the tracking program with the appropriate parameters job",
"as why: logging.error('Could not write tracking color to file \"%s\": %s', tracking_color_file, why)",
"the appropriate parameters job = Job(tracking_exec, *maps, config = tracking_config_file, maxDeltaT = (tracking_overlap",
"newly_tracked_maps) return newly_tracked_maps # Start point of the script if __name__ == '__main__':",
"previous tracking last_color = parse_tracking_color_file(tracking_color_file) # Create a job for the tracking program",
"of AIA files, in form YYYY-MM-DD') parser.add_argument('--end_date', '-e', default = datetime.utcnow().strftime('%Y-%m-%d'), help =",
"+ '.CHMap.fits') # Create a job for the get_CH_map program with the appropriate",
"parameters job = Job(get_CH_map_exec, segmented_map, *images, config = get_CH_map_config_file, output = CH_map) logging.info('Running",
"logging import argparse from glob import glob from datetime import datetime, timedelta from",
"Run the tracking job return_code, output, error = job() # Check if the",
"date) continue # Get the list of HMI images try: HMI_images = get_HMI_files(date)",
"track_maps(tracked_maps, untracked_maps, newly_tracked_maps) return newly_tracked_maps # Start point of the script if __name__",
"HMI files for the specified date''' file_path = get_good_file(hmi_file_pattern.format(date=date)) if file_path is None:",
"is None: tracked_maps = sorted(glob(os.path.join(maps_directory, '*.CHMap.fits'))) # If no untracked maps were given,",
"of the maps to run the tracking on maps = tracked_maps[-tracking_overlap:] + untracked_maps",
"tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) return newly_tracked_maps # Start point of",
"CH maps') parser.add_argument('--debug', '-d', default = False, action = 'store_true', help = 'Set",
"no untracked maps were given, we assume none are if untracked_maps is None:",
"all newly tracked maps newly_tracked_maps = list() # Start the loop for date",
"tracked_maps is None: tracked_maps = sorted(glob(os.path.join(maps_directory, '*.CHMap.fits'))) # If no untracked maps were",
"the CH HMI_images = list() # Create the Segmented map segmented_map = create_segmented_map(AIA_images,",
"last_color) logging.info('Running job\\n%s', job) # Run the tracking job return_code, output, error =",
"CHMaps''' # If no tracked maps were given, we assumed all existing are",
"'''Run the classification program''' # File path for the Segmented map to create",
"# Path to the tracking program config file tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config' # Path",
"= segmented_map) logging.info('Running job\\n%s', job) # Run the classification job return_code, output, error",
"= segmented_map) else: logging.debug('Job ran without errors, output: %s', output) return segmented_map def",
"of all newly tracked maps newly_tracked_maps = list() # Start the loop for",
"list of HMI images try: HMI_images = get_HMI_files(date) except FileNotFoundError as why: #",
"= None): '''Return the first file that matches the file_pattern and has a",
"newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) return newly_tracked_maps # Start point of the script",
": %(levelname)-8s : %(message)s', filename=args.log_file) else: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO,",
"was not found' % date) else: return [file_path] def create_segmented_map(AIA_images, date): '''Run the",
"file tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' # The minimum number of files that overlaps with",
": %(message)s') # Parse the start and end date start_date = datetime.strptime(args.start_date, '%Y-%m-%d')",
"'/opt/spoca4rwc/scripts/AIA_CH_tracking.config' # Path to the tracking color file tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' # The",
"job = Job(get_CH_map_exec, segmented_map, *images, config = get_CH_map_config_file, output = CH_map) logging.info('Running job\\n%s',",
"logging.info('Running job\\n%s', job) # Run the tracking job return_code, output, error = job()",
"classification program classification_run_frequency = timedelta(hours = 4) # Path to the get_CH_map program",
"= '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' # Path to the tracking program tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x' # Path",
"Job(tracking_exec, *maps, config = tracking_config_file, maxDeltaT = (tracking_overlap * classification_run_frequency).total_seconds(), newColor = last_color)",
"CH_map) logging.info('Running job\\n%s', job) # Run the get_CH_map job return_code, output, error =",
"are if tracked_maps is None: tracked_maps = sorted(glob(os.path.join(maps_directory, '*.CHMap.fits'))) # If no untracked",
"program on tracking_run_count = 3 # Directory to output the maps maps_directory =",
"0: return file_path else: logging.info('Skipping file %s with bad quality: %s', file_path, get_quality_errors(quality))",
"= 'Could not find output file {segmented_map}', segmented_map = segmented_map) else: logging.debug('Job ran",
"untracked_maps: tracked_maps = list(filter(lambda tracked_map: not os.path.samefile(tracked_map, untracked_map), tracked_maps)) # We will return",
"AIA_images = AIA_images) elif not os.path.exists(segmented_map): raise JobError(message = 'Could not find output",
"metavar = 'MAP', nargs='*', help = 'File paths of previously tracked CH maps')",
"#!/usr/bin/env python3 import os import sys import logging import argparse from glob import",
"and end date start_date = datetime.strptime(args.start_date, '%Y-%m-%d') end_date = datetime.strptime(args.end_date, '%Y-%m-%d') if args.end_date",
"FileNotFoundError('HMI file for date %s was not found' % date) else: return [file_path]",
"logging.info('Running job\\n%s', job) # Run the classification job return_code, output, error = job()",
"start and end date start_date = datetime.strptime(args.start_date, '%Y-%m-%d') end_date = datetime.strptime(args.end_date, '%Y-%m-%d') if",
"for the specified date''' file_path = get_good_file(hmi_file_pattern.format(date=date)) if file_path is None: raise FileNotFoundError('HMI",
"output: %s', output) return segmented_map def create_CH_map(segmented_map, date, images): '''Run the get_CH_map program'''",
"% date) else: return [file_path] def create_segmented_map(AIA_images, date): '''Run the classification program''' #",
"we have enough untracked maps, we run the tracking program if len(untracked_maps) >=",
"%(message)s', filename=args.log_file) else: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s",
"classification try: AIA_images = get_AIA_files(date, AIA_wavelengths) except FileNotFoundError as why: logging.warning('Missing AIA files",
"# It's okay if HMI files are missing, we just won't have HMI",
"Exception as why: logging.error('Could not write tracking color to file \"%s\": %s', tracking_color_file,",
"0: raise JobError(return_code, output, error, job_name = 'tracking', maps = maps) else: logging.debug('Job",
"HMI stats for the CH HMI_images = list() # Create the Segmented map",
"'2010-05-20', help = 'Start date of AIA files, in form YYYY-MM-DD') parser.add_argument('--end_date', '-e',",
"def run_spoca_jobs(start_date, end_date, tracked_maps = None, untracked_maps = None): '''Run the SPoCA jobs",
"and track the CHMaps''' # If no tracked maps were given, we assumed",
"= 3 # Directory to output the maps maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/' # Directory",
"last color of the previous tracking last_color = parse_tracking_color_file(tracking_color_file) # Create a job",
"the quality of the file if ignore_bits is None: quality = get_quality(file_path) else:",
"without errors, output: %s', output) return CH_map def track_maps(tracked_maps, untracked_maps, newly_tracked_maps): '''Run the",
"AIA_wavelengths) except FileNotFoundError as why: logging.warning('Missing AIA files for date %s, skipping missing",
"= Job(get_CH_map_exec, segmented_map, *images, config = get_CH_map_config_file, output = CH_map) logging.info('Running job\\n%s', job)",
"= classification_config_file, centersFile = classification_centers_file, output = segmented_map) logging.info('Running job\\n%s', job) # Run",
"{CH_map}', CH_map = CH_map) else: logging.debug('Job ran without errors, output: %s', output) return",
"f.readline() last_color = int(text.split(':')[1]) except Exception as why: logging.warning('Could not read tracking color",
"CH map to the list of untracked maps untracked_maps.append(CH_map) # If we have",
"to run tracking, need %s but have only %s', tracking_run_count, len(untracked_maps)) # Track",
"segmented_map) else: logging.debug('Job ran without errors, output: %s', output) return segmented_map def create_CH_map(segmented_map,",
"CH_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits') # Create a job for the get_CH_map",
"= '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' # The minimum number of files that overlaps with the previous",
"for date %s and wavelength %s was not found' % (date, wavelengths)) else:",
"return_code != 0: raise JobError(return_code, output, error, job_name = 'tracking', maps = maps)",
"the previous tracking (see maxDeltaT) tracking_overlap = 6 # The number of CH",
"track_maps(tracked_maps, untracked_maps, newly_tracked_maps) else: logging.debug('Not enough maps to run tracking, need %s but",
"< end: yield date date += step def parse_tracking_color_file(tracking_color_file): try: with open(tracking_color_file, 'tr')",
"import datetime, timedelta from job import Job, JobError from AIA_quality import get_quality, get_quality_errors",
"= get_quality(file_path, ignore_bits) # A quality of 0 means no defect if quality",
"parser = argparse.ArgumentParser(description = 'Create and track CH maps') parser.add_argument('--debug', '-d', default =",
"log file') parser.add_argument('--start_date', '-s', default = '2010-05-20', help = 'Start date of AIA",
"# Get the quality of the file if ignore_bits is None: quality =",
"path for the CH map to create CH_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits')",
"list of untracked maps untracked_maps.append(CH_map) # If we have enough untracked maps, we",
"color %s from file %s', last_color, tracking_color_file) return last_color def get_good_file(file_pattern, ignore_bits =",
"ignore_bits = None): '''Return the first file that matches the file_pattern and has",
"get_quality_errors(quality)) def get_AIA_files(date, wavelengths): '''Return a list of AIA files for the specified",
"file_paths = list() for wavelength in wavelengths: file_path = get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength)) if file_path",
"not found' % (date, wavelengths)) else: file_paths.append(file_path) return file_paths def get_HMI_files(date): '''Return a",
"tracked maps for untracked_map in untracked_maps: tracked_maps = list(filter(lambda tracked_map: not os.path.samefile(tracked_map, untracked_map),",
"file get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' # Path to the tracking program tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x'",
"import logging import argparse from glob import glob from datetime import datetime, timedelta",
"job import Job, JobError from AIA_quality import get_quality, get_quality_errors # Path to the",
"return segmented_map def create_CH_map(segmented_map, date, images): '''Run the get_CH_map program''' # File path",
"tracked CH maps') parser.add_argument('--untracked_maps', '-u', metavar = 'MAP', nargs='*', help = 'File paths",
"datetime import datetime, timedelta from job import Job, JobError from AIA_quality import get_quality,",
"= parser.parse_args() # Setup the logging if args.log_file: logging.basicConfig(level = logging.DEBUG if args.debug",
"format='%(asctime)s : %(levelname)-8s : %(message)s', filename=args.log_file) else: logging.basicConfig(level = logging.DEBUG if args.debug else",
"JobError(message = 'Could not find output file {CH_map}', CH_map = CH_map) else: logging.debug('Job",
"get_CH_map_config_file, output = CH_map) logging.info('Running job\\n%s', job) # Run the get_CH_map job return_code,",
"get_HMI_files(date) except FileNotFoundError as why: # It's okay if HMI files are missing,",
"is None: quality = get_quality(file_path) else: quality = get_quality(file_path, ignore_bits) # A quality",
"existing are if tracked_maps is None: tracked_maps = sorted(glob(os.path.join(maps_directory, '*.CHMap.fits'))) # If no",
"= logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s', filename=args.log_file) else:",
"has a good quality''' for file_path in sorted(glob(file_pattern)): # Get the quality of",
"# If we have enough untracked maps, we run the tracking program if",
"the job ran succesfully if return_code != 0: raise JobError(return_code, output, error, job_name",
"segmented_map = create_segmented_map(AIA_images, date) # Create the CH map CH_map = create_CH_map(segmented_map, date,",
"get_quality(file_path) else: quality = get_quality(file_path, ignore_bits) # A quality of 0 means no",
"succesfully if return_code != 0: raise JobError(return_code, output, error, job_name = 'classification', AIA_images",
"files are located aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' # Directory where the prepped HMI files",
"ran succesfully if return_code != 0: raise JobError(return_code, output, error, job_name = 'get_CH_map',",
"job ran succesfully if return_code != 0: raise JobError(return_code, output, error, job_name =",
"return_code != 0: raise JobError(return_code, output, error, job_name = 'classification', AIA_images = AIA_images)",
"[file_path] def create_segmented_map(AIA_images, date): '''Run the classification program''' # File path for the",
"ran succesfully if return_code != 0: raise JobError(return_code, output, error, job_name = 'tracking',",
"why: logging.warning('Could not read tracking color from file \"%s\": %s', tracking_color_file, why) return",
"errors, output: %s', output) try: with open(tracking_color_file, 'tw') as f: f.write(output) except Exception",
"maps for untracked_map in untracked_maps: tracked_maps = list(filter(lambda tracked_map: not os.path.samefile(tracked_map, untracked_map), tracked_maps))",
"just won't have HMI stats for the CH HMI_images = list() # Create",
"AIA files, in form YYYY-MM-DD') parser.add_argument('--end_date', '-e', default = datetime.utcnow().strftime('%Y-%m-%d'), help = 'End",
"return_code != 0: raise JobError(return_code, output, error, job_name = 'get_CH_map', segmented_map = segmented_map)",
"tracking_color_file, why) return 0 else: logging.debug('Found last color %s from file %s', last_color,",
"the untracked maps from the tracked maps for untracked_map in untracked_maps: tracked_maps =",
"# Directory where the prepped AIA files are located aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' #",
"Check if the job ran succesfully if return_code != 0: raise JobError(return_code, output,",
"sys import logging import argparse from glob import glob from datetime import datetime,",
"with the previous tracking (see maxDeltaT) tracking_overlap = 6 # The number of",
"where the prepped AIA files are located aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' # Directory where",
"maps = maps) else: logging.debug('Job ran without errors, output: %s', output) try: with",
"HMI_images = list() # Create the Segmented map segmented_map = create_segmented_map(AIA_images, date) #",
"__name__ == '__main__': # Get the arguments parser = argparse.ArgumentParser(description = 'Create and",
"else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s', filename=args.log_file) else: logging.basicConfig(level = logging.DEBUG if",
"job\\n%s', job) # Run the classification job return_code, output, error = job() #",
"and track CH maps') parser.add_argument('--debug', '-d', default = False, action = 'store_true', help",
"if untracked_maps is None: untracked_maps = list() else: # We need to remove",
"# Start the loop for date in date_range(start_date, end_date, classification_run_frequency): # Get the",
"the classification program''' # File path for the Segmented map to create segmented_map",
"color to file \"%s\": %s', tracking_color_file, why) return tracked_maps + untracked_maps, [], newly_tracked_maps",
"%s', tracking_run_count, len(untracked_maps)) # Track the remaing untracked maps if untracked_maps: tracked_maps, untracked_maps,",
"# Setup the logging if args.log_file: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO,",
"return 0 else: logging.debug('Found last color %s from file %s', last_color, tracking_color_file) return",
"for the Segmented map to create segmented_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits') #",
"None: quality = get_quality(file_path) else: quality = get_quality(file_path, ignore_bits) # A quality of",
"help = 'End date of AIA files, in form YYYY-MM-DD') parser.add_argument('--tracked_maps', '-t', metavar",
"args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s') # Parse the start and",
"untracked maps were given, we assume none are if untracked_maps is None: untracked_maps",
"file_path = get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength)) if file_path is None: raise FileNotFoundError('AIA file for date",
"= get_CH_map_config_file, output = CH_map) logging.info('Running job\\n%s', job) # Run the get_CH_map job",
"parameters job = Job(tracking_exec, *maps, config = tracking_config_file, maxDeltaT = (tracking_overlap * classification_run_frequency).total_seconds(),",
"the logging if args.log_file: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s :",
"Path to the tracking color file tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' # The minimum number",
"Path to the get_CH_map program get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' # Path to the get_CH_map",
"data to run the classification program on AIA_wavelengths = [193] def date_range(start, end,",
"parser.add_argument('--tracked_maps', '-t', metavar = 'MAP', nargs='*', help = 'File paths of previously tracked",
"the tracked maps for untracked_map in untracked_maps: tracked_maps = list(filter(lambda tracked_map: not os.path.samefile(tracked_map,",
"# We will return the list of all newly tracked maps newly_tracked_maps =",
"else: return [file_path] def create_segmented_map(AIA_images, date): '''Run the classification program''' # File path",
"program classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x' # Path to the classification program config file classification_config_file",
"Segmented map to create segmented_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits') # Create a",
"3 # Directory to output the maps maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/' # Directory where",
"the centers file classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' # The frequency to run the classification",
"read tracking color from file \"%s\": %s', tracking_color_file, why) return 0 else: logging.debug('Found",
"Create a job for the classification program with the appropriate parameters job =",
"from file %s', last_color, tracking_color_file) return last_color def get_good_file(file_pattern, ignore_bits = None): '''Return",
"# Path to the classification program classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x' # Path to the",
"output = segmented_map) logging.info('Running job\\n%s', job) # Run the classification job return_code, output,",
"'''Run the get_CH_map program''' # File path for the CH map to create",
"ran without errors, output: %s', output) return CH_map def track_maps(tracked_maps, untracked_maps, newly_tracked_maps): '''Run",
"else: logging.debug('Job ran without errors, output: %s', output) return segmented_map def create_CH_map(segmented_map, date,",
"We will return the list of all newly tracked maps newly_tracked_maps = list()",
"= '/opt/spoca4rwc/scripts/AIA_CH_classification.config' # Path to the centers file classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' # The",
"= get_good_file(hmi_file_pattern.format(date=date)) if file_path is None: raise FileNotFoundError('HMI file for date %s was",
"import glob from datetime import datetime, timedelta from job import Job, JobError from",
"segmented_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits') # Create a job for the classification",
"logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s') #",
"maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/' # Directory where the prepped AIA files are located aia_file_pattern",
"get_AIA_files(date, wavelengths): '''Return a list of AIA files for the specified date and",
"None: raise FileNotFoundError('HMI file for date %s was not found' % date) else:",
"0 means no defect if quality == 0: return file_path else: logging.info('Skipping file",
"tracking color to file \"%s\": %s', tracking_color_file, why) return tracked_maps + untracked_maps, [],",
"of the previous tracking last_color = parse_tracking_color_file(tracking_color_file) # Create a job for the",
"AIA files for the classification try: AIA_images = get_AIA_files(date, AIA_wavelengths) except FileNotFoundError as",
"get_CH_map job return_code, output, error = job() # Check if the job ran",
"job_name = 'classification', AIA_images = AIA_images) elif not os.path.exists(segmented_map): raise JobError(message = 'Could",
"HMI files are missing, we just won't have HMI stats for the CH",
"debug') parser.add_argument('--log_file', '-l', help = 'The file path of the log file') parser.add_argument('--start_date',",
"color file tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' # The minimum number of files that overlaps",
"bad quality: %s', file_path, get_quality_errors(quality)) def get_AIA_files(date, wavelengths): '''Return a list of AIA",
"file \"%s\": %s', tracking_color_file, why) return 0 else: logging.debug('Found last color %s from",
"output: %s', output) return CH_map def track_maps(tracked_maps, untracked_maps, newly_tracked_maps): '''Run the tracking program'''",
"+ untracked_maps # last color of the previous tracking last_color = parse_tracking_color_file(tracking_color_file) #",
"= tracking_config_file, maxDeltaT = (tracking_overlap * classification_run_frequency).total_seconds(), newColor = last_color) logging.info('Running job\\n%s', job)",
"open(tracking_color_file, 'tw') as f: f.write(output) except Exception as why: logging.error('Could not write tracking",
"tracked_maps = None, untracked_maps = None): '''Run the SPoCA jobs to create and",
"= 'The file path of the log file') parser.add_argument('--start_date', '-s', default = '2010-05-20',",
"# File path for the CH map to create CH_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S')",
"AIA files for the specified date and wavelengths''' file_paths = list() for wavelength",
"= start.replace() while date < end: yield date date += step def parse_tracking_color_file(tracking_color_file):",
"error = job() # Check if the job ran succesfully if return_code !=",
"tracking color from file \"%s\": %s', tracking_color_file, why) return 0 else: logging.debug('Found last",
"argparse from glob import glob from datetime import datetime, timedelta from job import",
"the get_CH_map program config file get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' # Path to the tracking",
"'-s', default = '2010-05-20', help = 'Start date of AIA files, in form",
"in form YYYY-MM-DD') parser.add_argument('--end_date', '-e', default = datetime.utcnow().strftime('%Y-%m-%d'), help = 'End date of",
"for wavelength in wavelengths: file_path = get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength)) if file_path is None: raise",
"not found' % date) else: return [file_path] def create_segmented_map(AIA_images, date): '''Run the classification",
"None): '''Run the SPoCA jobs to create and track the CHMaps''' # If",
"Path to the classification program config file classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config' # Path to",
"return tracked_maps + untracked_maps, [], newly_tracked_maps + untracked_maps def run_spoca_jobs(start_date, end_date, tracked_maps =",
"the Segmented map segmented_map = create_segmented_map(AIA_images, date) # Create the CH map CH_map",
"(date, wavelengths)) else: file_paths.append(file_path) return file_paths def get_HMI_files(date): '''Return a list of HMI",
"= get_AIA_files(date, AIA_wavelengths) except FileNotFoundError as why: logging.warning('Missing AIA files for date %s,",
"okay if HMI files are missing, we just won't have HMI stats for",
"timedelta from job import Job, JobError from AIA_quality import get_quality, get_quality_errors # Path",
"file classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' # The frequency to run the classification program classification_run_frequency",
"= get_HMI_files(date) except FileNotFoundError as why: # It's okay if HMI files are",
"nargs='*', help = 'File paths of previously tracked CH maps') parser.add_argument('--untracked_maps', '-u', metavar",
"if return_code != 0: raise JobError(return_code, output, error, job_name = 'classification', AIA_images =",
"%s and wavelength %s was not found' % (date, wavelengths)) else: file_paths.append(file_path) return",
"for the classification program with the appropriate parameters job = Job(classification_exec, *AIA_images, config",
"6 # The number of CH maps to run the tracking program on",
"tracking_color_file) return last_color def get_good_file(file_pattern, ignore_bits = None): '''Return the first file that",
"file for date %s was not found' % date) else: return [file_path] def",
"maxDeltaT = (tracking_overlap * classification_run_frequency).total_seconds(), newColor = last_color) logging.info('Running job\\n%s', job) # Run",
"parser.add_argument('--untracked_maps', '-u', metavar = 'MAP', nargs='*', help = 'File paths of not yet",
"config = tracking_config_file, maxDeltaT = (tracking_overlap * classification_run_frequency).total_seconds(), newColor = last_color) logging.info('Running job\\n%s',",
"'tr') as f: text = f.readline() last_color = int(text.split(':')[1]) except Exception as why:",
"logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s') # Parse the",
"logging.debug('Not enough maps to run tracking, need %s but have only %s', tracking_run_count,",
"files that overlaps with the previous tracking (see maxDeltaT) tracking_overlap = 6 #",
"maps') parser.add_argument('--untracked_maps', '-u', metavar = 'MAP', nargs='*', help = 'File paths of not",
"action = 'store_true', help = 'Set the logging level to debug') parser.add_argument('--log_file', '-l',",
"if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s') # Parse the start",
"help = 'File paths of not yet tracked CH maps') args = parser.parse_args()",
"to the classification program classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x' # Path to the classification program",
"# Start point of the script if __name__ == '__main__': # Get the",
"file_path is None: raise FileNotFoundError('HMI file for date %s was not found' %",
"find output file {segmented_map}', segmented_map = segmented_map) else: logging.debug('Job ran without errors, output:",
"were given, we assume none are if untracked_maps is None: untracked_maps = list()",
"quality of 0 means no defect if quality == 0: return file_path else:",
"%s', output) return segmented_map def create_CH_map(segmented_map, date, images): '''Run the get_CH_map program''' #",
"file_pattern and has a good quality''' for file_path in sorted(glob(file_pattern)): # Get the",
"file_path is None: raise FileNotFoundError('AIA file for date %s and wavelength %s was",
"datetime.utcnow().strftime('%Y-%m-%d'), help = 'End date of AIA files, in form YYYY-MM-DD') parser.add_argument('--tracked_maps', '-t',",
"'/data/spoca/spoca4rwc/quicklook/CH_maps/' # Directory where the prepped AIA files are located aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits'",
"else: file_paths.append(file_path) return file_paths def get_HMI_files(date): '''Return a list of HMI files for",
"# Create the CH map CH_map = create_CH_map(segmented_map, date, AIA_images + HMI_images) #",
"file path of the log file') parser.add_argument('--start_date', '-s', default = '2010-05-20', help =",
"are if untracked_maps is None: untracked_maps = list() else: # We need to",
"try: CH_maps = run_spoca_jobs(start_date, end_date, args.tracked_maps, args.untracked_maps) except Exception as why: logging.critical(str(why)) sys.exit(1)",
"file_paths.append(file_path) return file_paths def get_HMI_files(date): '''Return a list of HMI files for the",
"list() # Create the Segmented map segmented_map = create_segmented_map(AIA_images, date) # Create the",
"# Path to the get_CH_map program config file get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' # Path",
"0: raise JobError(return_code, output, error, job_name = 'get_CH_map', segmented_map = segmented_map) elif not",
"default = False, action = 'store_true', help = 'Set the logging level to",
"the get_CH_map job return_code, output, error = job() # Check if the job",
"missing, we just won't have HMI stats for the CH HMI_images = list()",
"# Path to the get_CH_map program get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' # Path to the",
"step def parse_tracking_color_file(tracking_color_file): try: with open(tracking_color_file, 'tr') as f: text = f.readline() last_color",
"no defect if quality == 0: return file_path else: logging.info('Skipping file %s with",
"create_CH_map(segmented_map, date, images): '''Run the get_CH_map program''' # File path for the CH",
"of untracked maps untracked_maps.append(CH_map) # If we have enough untracked maps, we run",
"output the maps maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/' # Directory where the prepped AIA files",
"the SPoCA jobs to create and track the CHMaps''' # If no tracked",
"HMI_images = get_HMI_files(date) except FileNotFoundError as why: # It's okay if HMI files",
"= (tracking_overlap * classification_run_frequency).total_seconds(), newColor = last_color) logging.info('Running job\\n%s', job) # Run the",
"# Path to the tracking program tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x' # Path to the",
"logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s', filename=args.log_file) else: logging.basicConfig(level",
"return_code, output, error = job() # Check if the job ran succesfully if",
"and has a good quality''' for file_path in sorted(glob(file_pattern)): # Get the quality",
"# Create the Segmented map segmented_map = create_segmented_map(AIA_images, date) # Create the CH",
"return the list of all newly tracked maps newly_tracked_maps = list() # Start",
"defect if quality == 0: return file_path else: logging.info('Skipping file %s with bad",
"the Segmented map to create segmented_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits') # Create",
"logging.info('Running job\\n%s', job) # Run the get_CH_map job return_code, output, error = job()",
"is None: untracked_maps = list() else: # We need to remove the untracked",
"missing files!', date) continue # Get the list of HMI images try: HMI_images",
"as f: f.write(output) except Exception as why: logging.error('Could not write tracking color to",
"= AIA_images) elif not os.path.exists(segmented_map): raise JobError(message = 'Could not find output file",
"error, job_name = 'tracking', maps = maps) else: logging.debug('Job ran without errors, output:",
"import Job, JobError from AIA_quality import get_quality, get_quality_errors # Path to the classification",
"segmented_map = segmented_map) elif not os.path.exists(CH_map): raise JobError(message = 'Could not find output",
"for date %s was not found' % date) else: return [file_path] def create_segmented_map(AIA_images,",
"program''' # File path for the CH map to create CH_map = os.path.join(maps_directory,",
"tracking color file tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' # The minimum number of files that",
"program tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x' # Path to the tracking program config file tracking_config_file",
"the first file that matches the file_pattern and has a good quality''' for",
"def get_AIA_files(date, wavelengths): '''Return a list of AIA files for the specified date",
"a list of HMI files for the specified date''' file_path = get_good_file(hmi_file_pattern.format(date=date)) if",
"last_color = parse_tracking_color_file(tracking_color_file) # Create a job for the tracking program with the",
"'classification', AIA_images = AIA_images) elif not os.path.exists(segmented_map): raise JobError(message = 'Could not find",
"= parse_tracking_color_file(tracking_color_file) # Create a job for the tracking program with the appropriate",
"why: logging.warning('Missing AIA files for date %s, skipping missing files!', date) continue #",
"the CH map CH_map = create_CH_map(segmented_map, date, AIA_images + HMI_images) # Add the",
"of the script if __name__ == '__main__': # Get the arguments parser =",
"the script if __name__ == '__main__': # Get the arguments parser = argparse.ArgumentParser(description",
"return last_color def get_good_file(file_pattern, ignore_bits = None): '''Return the first file that matches",
"Wavelengths of AIA data to run the classification program on AIA_wavelengths = [193]",
"import os import sys import logging import argparse from glob import glob from",
"classification program classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x' # Path to the classification program config file",
"specified date and wavelengths''' file_paths = list() for wavelength in wavelengths: file_path =",
"the logging level to debug') parser.add_argument('--log_file', '-l', help = 'The file path of",
"list() else: # We need to remove the untracked maps from the tracked",
"prepped AIA files are located aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' # Directory where the prepped",
"Add the CH map to the list of untracked maps untracked_maps.append(CH_map) # If",
"untracked maps if untracked_maps: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) return newly_tracked_maps",
"program config file get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' # Path to the tracking program tracking_exec",
"'''Return the first file that matches the file_pattern and has a good quality'''",
"logging.info('Skipping file %s with bad quality: %s', file_path, get_quality_errors(quality)) def get_AIA_files(date, wavelengths): '''Return",
"program config file tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config' # Path to the tracking color file",
"# Run the classification job return_code, output, error = job() # Check if",
"= '/opt/spoca4rwc/SPoCA/bin/classification.x' # Path to the classification program config file classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config'",
"CH maps to run the tracking program on tracking_run_count = 3 # Directory",
"except Exception as why: logging.warning('Could not read tracking color from file \"%s\": %s',",
"= '/data/spoca/spoca4rwc/quicklook/CH_maps/' # Directory where the prepped AIA files are located aia_file_pattern =",
"maps were given, we assume none are if untracked_maps is None: untracked_maps =",
"classification program with the appropriate parameters job = Job(classification_exec, *AIA_images, config = classification_config_file,",
"the list of all newly tracked maps newly_tracked_maps = list() # Start the",
"tracking program if len(untracked_maps) >= tracking_run_count: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps)",
"where the prepped HMI files are located hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' # Wavelengths of",
"quality == 0: return file_path else: logging.info('Skipping file %s with bad quality: %s',",
"we just won't have HMI stats for the CH HMI_images = list() #",
"remaing untracked maps if untracked_maps: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) return",
"config = get_CH_map_config_file, output = CH_map) logging.info('Running job\\n%s', job) # Run the get_CH_map",
"date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits') # Create a job for the get_CH_map program with the",
"the arguments parser = argparse.ArgumentParser(description = 'Create and track CH maps') parser.add_argument('--debug', '-d',",
"of not yet tracked CH maps') args = parser.parse_args() # Setup the logging",
"list of all newly tracked maps newly_tracked_maps = list() # Start the loop",
"output) return segmented_map def create_CH_map(segmented_map, date, images): '''Run the get_CH_map program''' # File",
"'%Y-%m-%d') if args.end_date else datetime.utcnow() # Run the SPoCA jobs try: CH_maps =",
"# We need to remove the untracked maps from the tracked maps for",
"sorted(glob(file_pattern)): # Get the quality of the file if ignore_bits is None: quality",
"run the tracking program on tracking_run_count = 3 # Directory to output the",
"program with the appropriate parameters job = Job(get_CH_map_exec, segmented_map, *images, config = get_CH_map_config_file,",
"newColor = last_color) logging.info('Running job\\n%s', job) # Run the tracking job return_code, output,",
"'tracking', maps = maps) else: logging.debug('Job ran without errors, output: %s', output) try:",
"# Path to the classification program config file classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config' # Path",
"= 6 # The number of CH maps to run the tracking program",
"was not found' % (date, wavelengths)) else: file_paths.append(file_path) return file_paths def get_HMI_files(date): '''Return",
"map to create segmented_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits') # Create a job",
"files for date %s, skipping missing files!', date) continue # Get the list",
"centersFile = classification_centers_file, output = segmented_map) logging.info('Running job\\n%s', job) # Run the classification",
"'The file path of the log file') parser.add_argument('--start_date', '-s', default = '2010-05-20', help",
"'MAP', nargs='*', help = 'File paths of not yet tracked CH maps') args",
"int(text.split(':')[1]) except Exception as why: logging.warning('Could not read tracking color from file \"%s\":",
"found' % (date, wavelengths)) else: file_paths.append(file_path) return file_paths def get_HMI_files(date): '''Return a list",
"0: raise JobError(return_code, output, error, job_name = 'classification', AIA_images = AIA_images) elif not",
"the tracking program tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x' # Path to the tracking program config",
"else: logging.debug('Job ran without errors, output: %s', output) try: with open(tracking_color_file, 'tw') as",
"metavar = 'MAP', nargs='*', help = 'File paths of not yet tracked CH",
"Setup the logging if args.log_file: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s",
"won't have HMI stats for the CH HMI_images = list() # Create the",
"= 'MAP', nargs='*', help = 'File paths of previously tracked CH maps') parser.add_argument('--untracked_maps',",
"wavelengths: file_path = get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength)) if file_path is None: raise FileNotFoundError('AIA file for",
"the classification try: AIA_images = get_AIA_files(date, AIA_wavelengths) except FileNotFoundError as why: logging.warning('Missing AIA",
"the maps maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/' # Directory where the prepped AIA files are",
"False, action = 'store_true', help = 'Set the logging level to debug') parser.add_argument('--log_file',",
"The number of CH maps to run the tracking program on tracking_run_count =",
"untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) return newly_tracked_maps # Start point of the",
"A quality of 0 means no defect if quality == 0: return file_path",
"to the tracking color file tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' # The minimum number of",
"os.path.exists(segmented_map): raise JobError(message = 'Could not find output file {segmented_map}', segmented_map = segmented_map)",
"'-e', default = datetime.utcnow().strftime('%Y-%m-%d'), help = 'End date of AIA files, in form",
"the get_CH_map program get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' # Path to the get_CH_map program config",
"Run the classification job return_code, output, error = job() # Check if the",
"run the tracking on maps = tracked_maps[-tracking_overlap:] + untracked_maps # last color of",
"centers file classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' # The frequency to run the classification program",
"+ untracked_maps, [], newly_tracked_maps + untracked_maps def run_spoca_jobs(start_date, end_date, tracked_maps = None, untracked_maps",
"have only %s', tracking_run_count, len(untracked_maps)) # Track the remaing untracked maps if untracked_maps:",
"map segmented_map = create_segmented_map(AIA_images, date) # Create the CH map CH_map = create_CH_map(segmented_map,",
"on maps = tracked_maps[-tracking_overlap:] + untracked_maps # last color of the previous tracking",
"If no untracked maps were given, we assume none are if untracked_maps is",
"%s was not found' % (date, wavelengths)) else: file_paths.append(file_path) return file_paths def get_HMI_files(date):",
"while date < end: yield date date += step def parse_tracking_color_file(tracking_color_file): try: with",
"tracking (see maxDeltaT) tracking_overlap = 6 # The number of CH maps to",
"errors, output: %s', output) return segmented_map def create_CH_map(segmented_map, date, images): '''Run the get_CH_map",
"map to the list of untracked maps untracked_maps.append(CH_map) # If we have enough",
"\"%s\": %s', tracking_color_file, why) return 0 else: logging.debug('Found last color %s from file",
"= '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' # The frequency to run the classification program classification_run_frequency = timedelta(hours",
"the file_pattern and has a good quality''' for file_path in sorted(glob(file_pattern)): # Get",
"as why: # It's okay if HMI files are missing, we just won't",
"%s', file_path, get_quality_errors(quality)) def get_AIA_files(date, wavelengths): '''Return a list of AIA files for",
"tracking program on tracking_run_count = 3 # Directory to output the maps maps_directory",
"# Add the CH map to the list of untracked maps untracked_maps.append(CH_map) #",
"if HMI files are missing, we just won't have HMI stats for the",
"os.path.exists(CH_map): raise JobError(message = 'Could not find output file {CH_map}', CH_map = CH_map)",
"classification program config file classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config' # Path to the centers file",
"continue # Get the list of HMI images try: HMI_images = get_HMI_files(date) except",
"datetime.strptime(args.end_date, '%Y-%m-%d') if args.end_date else datetime.utcnow() # Run the SPoCA jobs try: CH_maps",
"enough maps to run tracking, need %s but have only %s', tracking_run_count, len(untracked_maps))",
"list of AIA files for the specified date and wavelengths''' file_paths = list()",
"error, job_name = 'classification', AIA_images = AIA_images) elif not os.path.exists(segmented_map): raise JobError(message =",
"the maps to run the tracking on maps = tracked_maps[-tracking_overlap:] + untracked_maps #",
"create_segmented_map(AIA_images, date) # Create the CH map CH_map = create_CH_map(segmented_map, date, AIA_images +",
"'/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' # The frequency to run the classification program classification_run_frequency = timedelta(hours =",
"tracking last_color = parse_tracking_color_file(tracking_color_file) # Create a job for the tracking program with",
"# Get the arguments parser = argparse.ArgumentParser(description = 'Create and track CH maps')",
"start_date = datetime.strptime(args.start_date, '%Y-%m-%d') end_date = datetime.strptime(args.end_date, '%Y-%m-%d') if args.end_date else datetime.utcnow() #",
"the SPoCA jobs try: CH_maps = run_spoca_jobs(start_date, end_date, args.tracked_maps, args.untracked_maps) except Exception as",
"os import sys import logging import argparse from glob import glob from datetime",
"ignore_bits) # A quality of 0 means no defect if quality == 0:",
"tracking program with the appropriate parameters job = Job(tracking_exec, *maps, config = tracking_config_file,",
"Job(get_CH_map_exec, segmented_map, *images, config = get_CH_map_config_file, output = CH_map) logging.info('Running job\\n%s', job) #",
"program if len(untracked_maps) >= tracking_run_count: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) else:",
"untracked_maps, newly_tracked_maps) else: logging.debug('Not enough maps to run tracking, need %s but have",
"last_color, tracking_color_file) return last_color def get_good_file(file_pattern, ignore_bits = None): '''Return the first file",
"file if ignore_bits is None: quality = get_quality(file_path) else: quality = get_quality(file_path, ignore_bits)",
"!= 0: raise JobError(return_code, output, error, job_name = 'classification', AIA_images = AIA_images) elif",
"config file tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config' # Path to the tracking color file tracking_color_file",
"get_CH_map program with the appropriate parameters job = Job(get_CH_map_exec, segmented_map, *images, config =",
"logging.debug('Job ran without errors, output: %s', output) return CH_map def track_maps(tracked_maps, untracked_maps, newly_tracked_maps):",
"# Get the AIA files for the classification try: AIA_images = get_AIA_files(date, AIA_wavelengths)",
"write tracking color to file \"%s\": %s', tracking_color_file, why) return tracked_maps + untracked_maps,",
"tracked maps newly_tracked_maps = list() # Start the loop for date in date_range(start_date,",
"FileNotFoundError as why: # It's okay if HMI files are missing, we just",
"Path to the classification program classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x' # Path to the classification",
"args.end_date else datetime.utcnow() # Run the SPoCA jobs try: CH_maps = run_spoca_jobs(start_date, end_date,",
"= 'Could not find output file {CH_map}', CH_map = CH_map) else: logging.debug('Job ran",
"= list() for wavelength in wavelengths: file_path = get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength)) if file_path is",
"date): '''Run the classification program''' # File path for the Segmented map to",
"argparse.ArgumentParser(description = 'Create and track CH maps') parser.add_argument('--debug', '-d', default = False, action",
"'-l', help = 'The file path of the log file') parser.add_argument('--start_date', '-s', default",
"job for the classification program with the appropriate parameters job = Job(classification_exec, *AIA_images,",
"untracked_maps, [], newly_tracked_maps + untracked_maps def run_spoca_jobs(start_date, end_date, tracked_maps = None, untracked_maps =",
"track CH maps') parser.add_argument('--debug', '-d', default = False, action = 'store_true', help =",
"JobError(return_code, output, error, job_name = 'tracking', maps = maps) else: logging.debug('Job ran without",
"script if __name__ == '__main__': # Get the arguments parser = argparse.ArgumentParser(description =",
"'MAP', nargs='*', help = 'File paths of previously tracked CH maps') parser.add_argument('--untracked_maps', '-u',",
"list of HMI files for the specified date''' file_path = get_good_file(hmi_file_pattern.format(date=date)) if file_path",
"quality''' for file_path in sorted(glob(file_pattern)): # Get the quality of the file if",
"# Create a job for the classification program with the appropriate parameters job",
"create_segmented_map(AIA_images, date): '''Run the classification program''' # File path for the Segmented map",
"error, job_name = 'get_CH_map', segmented_map = segmented_map) elif not os.path.exists(CH_map): raise JobError(message =",
"# Run the SPoCA jobs try: CH_maps = run_spoca_jobs(start_date, end_date, args.tracked_maps, args.untracked_maps) except",
"'Create and track CH maps') parser.add_argument('--debug', '-d', default = False, action = 'store_true',",
"date''' file_path = get_good_file(hmi_file_pattern.format(date=date)) if file_path is None: raise FileNotFoundError('HMI file for date",
"date %s, skipping missing files!', date) continue # Get the list of HMI",
"for untracked_map in untracked_maps: tracked_maps = list(filter(lambda tracked_map: not os.path.samefile(tracked_map, untracked_map), tracked_maps)) #",
"for date''' date = start.replace() while date < end: yield date date +=",
"File paths of the maps to run the tracking on maps = tracked_maps[-tracking_overlap:]",
"Create a job for the get_CH_map program with the appropriate parameters job =",
"= sorted(glob(os.path.join(maps_directory, '*.CHMap.fits'))) # If no untracked maps were given, we assume none",
"maps maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/' # Directory where the prepped AIA files are located",
"of the file if ignore_bits is None: quality = get_quality(file_path) else: quality =",
"to remove the untracked maps from the tracked maps for untracked_map in untracked_maps:",
"track_maps(tracked_maps, untracked_maps, newly_tracked_maps): '''Run the tracking program''' # File paths of the maps",
"= track_maps(tracked_maps, untracked_maps, newly_tracked_maps) return newly_tracked_maps # Start point of the script if",
"the get_CH_map program with the appropriate parameters job = Job(get_CH_map_exec, segmented_map, *images, config",
"def parse_tracking_color_file(tracking_color_file): try: with open(tracking_color_file, 'tr') as f: text = f.readline() last_color =",
"= Job(classification_exec, *AIA_images, config = classification_config_file, centersFile = classification_centers_file, output = segmented_map) logging.info('Running",
"color from file \"%s\": %s', tracking_color_file, why) return 0 else: logging.debug('Found last color",
"the tracking on maps = tracked_maps[-tracking_overlap:] + untracked_maps # last color of the",
"track the CHMaps''' # If no tracked maps were given, we assumed all",
"'__main__': # Get the arguments parser = argparse.ArgumentParser(description = 'Create and track CH",
"'/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' # Path to the get_CH_map program config file get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' #",
"file tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config' # Path to the tracking color file tracking_color_file =",
"output = CH_map) logging.info('Running job\\n%s', job) # Run the get_CH_map job return_code, output,",
"list() # Start the loop for date in date_range(start_date, end_date, classification_run_frequency): # Get",
"with the appropriate parameters job = Job(get_CH_map_exec, segmented_map, *images, config = get_CH_map_config_file, output",
"else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s') # Parse the start and end",
"of AIA data to run the classification program on AIA_wavelengths = [193] def",
"AIA data to run the classification program on AIA_wavelengths = [193] def date_range(start,",
"= argparse.ArgumentParser(description = 'Create and track CH maps') parser.add_argument('--debug', '-d', default = False,",
"with the appropriate parameters job = Job(classification_exec, *AIA_images, config = classification_config_file, centersFile =",
"the log file') parser.add_argument('--start_date', '-s', default = '2010-05-20', help = 'Start date of",
"args = parser.parse_args() # Setup the logging if args.log_file: logging.basicConfig(level = logging.DEBUG if",
"= False, action = 'store_true', help = 'Set the logging level to debug')",
"return newly_tracked_maps # Start point of the script if __name__ == '__main__': #",
"date''' date = start.replace() while date < end: yield date date += step",
"%s', tracking_color_file, why) return 0 else: logging.debug('Found last color %s from file %s',",
"nargs='*', help = 'File paths of not yet tracked CH maps') args =",
"= create_segmented_map(AIA_images, date) # Create the CH map CH_map = create_CH_map(segmented_map, date, AIA_images",
"os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits') # Create a job for the classification program with",
"good quality''' for file_path in sorted(glob(file_pattern)): # Get the quality of the file",
"JobError from AIA_quality import get_quality, get_quality_errors # Path to the classification program classification_exec",
"to run the tracking on maps = tracked_maps[-tracking_overlap:] + untracked_maps # last color",
"Segmented map segmented_map = create_segmented_map(AIA_images, date) # Create the CH map CH_map =",
"help = 'File paths of previously tracked CH maps') parser.add_argument('--untracked_maps', '-u', metavar =",
"of HMI files for the specified date''' file_path = get_good_file(hmi_file_pattern.format(date=date)) if file_path is",
"maps to run tracking, need %s but have only %s', tracking_run_count, len(untracked_maps)) #",
"not find output file {CH_map}', CH_map = CH_map) else: logging.debug('Job ran without errors,",
"AIA_wavelengths = [193] def date_range(start, end, step): '''Equivalent to range for date''' date",
"date, images): '''Run the get_CH_map program''' # File path for the CH map",
"end: yield date date += step def parse_tracking_color_file(tracking_color_file): try: with open(tracking_color_file, 'tr') as",
"open(tracking_color_file, 'tr') as f: text = f.readline() last_color = int(text.split(':')[1]) except Exception as",
"color of the previous tracking last_color = parse_tracking_color_file(tracking_color_file) # Create a job for",
"the appropriate parameters job = Job(classification_exec, *AIA_images, config = classification_config_file, centersFile = classification_centers_file,",
"the classification program on AIA_wavelengths = [193] def date_range(start, end, step): '''Equivalent to",
"raise JobError(return_code, output, error, job_name = 'get_CH_map', segmented_map = segmented_map) elif not os.path.exists(CH_map):",
"= list() # Start the loop for date in date_range(start_date, end_date, classification_run_frequency): #",
"date_range(start_date, end_date, classification_run_frequency): # Get the AIA files for the classification try: AIA_images",
"to debug') parser.add_argument('--log_file', '-l', help = 'The file path of the log file')",
"a job for the get_CH_map program with the appropriate parameters job = Job(get_CH_map_exec,",
"+= step def parse_tracking_color_file(tracking_color_file): try: with open(tracking_color_file, 'tr') as f: text = f.readline()",
"the classification program with the appropriate parameters job = Job(classification_exec, *AIA_images, config =",
"maps, we run the tracking program if len(untracked_maps) >= tracking_run_count: tracked_maps, untracked_maps, newly_tracked_maps",
"= logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s') # Parse",
"quality = get_quality(file_path) else: quality = get_quality(file_path, ignore_bits) # A quality of 0",
"else: logging.debug('Not enough maps to run tracking, need %s but have only %s',",
"args.log_file: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s',",
"= create_CH_map(segmented_map, date, AIA_images + HMI_images) # Add the CH map to the",
"ran without errors, output: %s', output) return segmented_map def create_CH_map(segmented_map, date, images): '''Run",
"(tracking_overlap * classification_run_frequency).total_seconds(), newColor = last_color) logging.info('Running job\\n%s', job) # Run the tracking",
"= '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' # Directory where the prepped HMI files are located hmi_file_pattern =",
"on AIA_wavelengths = [193] def date_range(start, end, step): '''Equivalent to range for date'''",
"Path to the get_CH_map program config file get_CH_map_config_file = '/opt/spoca4rwc/scripts/AIA_CH_get_CH_map.config' # Path to",
"# Run the tracking job return_code, output, error = job() # Check if",
"but have only %s', tracking_run_count, len(untracked_maps)) # Track the remaing untracked maps if",
"parser.add_argument('--debug', '-d', default = False, action = 'store_true', help = 'Set the logging",
"wavelengths''' file_paths = list() for wavelength in wavelengths: file_path = get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength)) if",
"classification_centers_file, output = segmented_map) logging.info('Running job\\n%s', job) # Run the classification job return_code,",
"tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x' # Path to the tracking program config file tracking_config_file =",
"text = f.readline() last_color = int(text.split(':')[1]) except Exception as why: logging.warning('Could not read",
"enough untracked maps, we run the tracking program if len(untracked_maps) >= tracking_run_count: tracked_maps,",
"glob import glob from datetime import datetime, timedelta from job import Job, JobError",
"form YYYY-MM-DD') parser.add_argument('--end_date', '-e', default = datetime.utcnow().strftime('%Y-%m-%d'), help = 'End date of AIA",
"we assumed all existing are if tracked_maps is None: tracked_maps = sorted(glob(os.path.join(maps_directory, '*.CHMap.fits')))",
"program''' # File path for the Segmented map to create segmented_map = os.path.join(maps_directory,",
"for file_path in sorted(glob(file_pattern)): # Get the quality of the file if ignore_bits",
"parser.add_argument('--log_file', '-l', help = 'The file path of the log file') parser.add_argument('--start_date', '-s',",
"if args.log_file: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s :",
"sorted(glob(os.path.join(maps_directory, '*.CHMap.fits'))) # If no untracked maps were given, we assume none are",
"*AIA_images, config = classification_config_file, centersFile = classification_centers_file, output = segmented_map) logging.info('Running job\\n%s', job)",
"newly_tracked_maps) else: logging.debug('Not enough maps to run tracking, need %s but have only",
"level to debug') parser.add_argument('--log_file', '-l', help = 'The file path of the log",
"= '2010-05-20', help = 'Start date of AIA files, in form YYYY-MM-DD') parser.add_argument('--end_date',",
"output file {segmented_map}', segmented_map = segmented_map) else: logging.debug('Job ran without errors, output: %s',",
"from AIA_quality import get_quality, get_quality_errors # Path to the classification program classification_exec =",
"job = Job(tracking_exec, *maps, config = tracking_config_file, maxDeltaT = (tracking_overlap * classification_run_frequency).total_seconds(), newColor",
"get_AIA_files(date, AIA_wavelengths) except FileNotFoundError as why: logging.warning('Missing AIA files for date %s, skipping",
"= datetime.strptime(args.start_date, '%Y-%m-%d') end_date = datetime.strptime(args.end_date, '%Y-%m-%d') if args.end_date else datetime.utcnow() # Run",
"tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' # The minimum number of files that overlaps with the",
"'Could not find output file {segmented_map}', segmented_map = segmented_map) else: logging.debug('Job ran without",
"aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' # Directory where the prepped HMI files are located hmi_file_pattern",
"return file_paths def get_HMI_files(date): '''Return a list of HMI files for the specified",
"= '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' # Wavelengths of AIA data to run the classification program on",
"of files that overlaps with the previous tracking (see maxDeltaT) tracking_overlap = 6",
"get_good_file(file_pattern, ignore_bits = None): '''Return the first file that matches the file_pattern and",
"classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x' # Path to the classification program config file classification_config_file =",
"= 4) # Path to the get_CH_map program get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' # Path",
"quality = get_quality(file_path, ignore_bits) # A quality of 0 means no defect if",
"have HMI stats for the CH HMI_images = list() # Create the Segmented",
"return CH_map def track_maps(tracked_maps, untracked_maps, newly_tracked_maps): '''Run the tracking program''' # File paths",
"AIA files are located aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' # Directory where the prepped HMI",
"path of the log file') parser.add_argument('--start_date', '-s', default = '2010-05-20', help = 'Start",
"== 0: return file_path else: logging.info('Skipping file %s with bad quality: %s', file_path,",
"{segmented_map}', segmented_map = segmented_map) else: logging.debug('Job ran without errors, output: %s', output) return",
"if quality == 0: return file_path else: logging.info('Skipping file %s with bad quality:",
"None: raise FileNotFoundError('AIA file for date %s and wavelength %s was not found'",
"segmented_map) logging.info('Running job\\n%s', job) # Run the classification job return_code, output, error =",
": %(levelname)-8s : %(message)s') # Parse the start and end date start_date =",
"files, in form YYYY-MM-DD') parser.add_argument('--end_date', '-e', default = datetime.utcnow().strftime('%Y-%m-%d'), help = 'End date",
"if file_path is None: raise FileNotFoundError('AIA file for date %s and wavelength %s",
"= tracked_maps[-tracking_overlap:] + untracked_maps # last color of the previous tracking last_color =",
"the tracking program with the appropriate parameters job = Job(tracking_exec, *maps, config =",
"except FileNotFoundError as why: # It's okay if HMI files are missing, we",
"AIA_images + HMI_images) # Add the CH map to the list of untracked",
"parser.add_argument('--start_date', '-s', default = '2010-05-20', help = 'Start date of AIA files, in",
"the classification program config file classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config' # Path to the centers",
"= 'classification', AIA_images = AIA_images) elif not os.path.exists(segmented_map): raise JobError(message = 'Could not",
"for the classification try: AIA_images = get_AIA_files(date, AIA_wavelengths) except FileNotFoundError as why: logging.warning('Missing",
"to the classification program config file classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config' # Path to the",
"date < end: yield date date += step def parse_tracking_color_file(tracking_color_file): try: with open(tracking_color_file,",
"job for the tracking program with the appropriate parameters job = Job(tracking_exec, *maps,",
"output, error, job_name = 'get_CH_map', segmented_map = segmented_map) elif not os.path.exists(CH_map): raise JobError(message",
"'-t', metavar = 'MAP', nargs='*', help = 'File paths of previously tracked CH",
"not os.path.exists(CH_map): raise JobError(message = 'Could not find output file {CH_map}', CH_map =",
"tracking program''' # File paths of the maps to run the tracking on",
"matches the file_pattern and has a good quality''' for file_path in sorted(glob(file_pattern)): #",
"classification_run_frequency): # Get the AIA files for the classification try: AIA_images = get_AIA_files(date,",
"tracked CH maps') args = parser.parse_args() # Setup the logging if args.log_file: logging.basicConfig(level",
"# Wavelengths of AIA data to run the classification program on AIA_wavelengths =",
"date_range(start, end, step): '''Equivalent to range for date''' date = start.replace() while date",
"wavelength %s was not found' % (date, wavelengths)) else: file_paths.append(file_path) return file_paths def",
"files are missing, we just won't have HMI stats for the CH HMI_images",
"tracking_run_count = 3 # Directory to output the maps maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/' #",
"arguments parser = argparse.ArgumentParser(description = 'Create and track CH maps') parser.add_argument('--debug', '-d', default",
"'Start date of AIA files, in form YYYY-MM-DD') parser.add_argument('--end_date', '-e', default = datetime.utcnow().strftime('%Y-%m-%d'),",
"'%Y-%m-%d') end_date = datetime.strptime(args.end_date, '%Y-%m-%d') if args.end_date else datetime.utcnow() # Run the SPoCA",
"= 'Set the logging level to debug') parser.add_argument('--log_file', '-l', help = 'The file",
"path for the Segmented map to create segmented_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits')",
"= segmented_map) elif not os.path.exists(CH_map): raise JobError(message = 'Could not find output file",
"JobError(message = 'Could not find output file {segmented_map}', segmented_map = segmented_map) else: logging.debug('Job",
"find output file {CH_map}', CH_map = CH_map) else: logging.debug('Job ran without errors, output:",
"Create a job for the tracking program with the appropriate parameters job =",
"overlaps with the previous tracking (see maxDeltaT) tracking_overlap = 6 # The number",
"run tracking, need %s but have only %s', tracking_run_count, len(untracked_maps)) # Track the",
"Directory where the prepped AIA files are located aia_file_pattern = '/data/SDO/public/AIA_quicklook/{wavelength:04d}/{date.year:04d}/{date.month:02d}/{date.day:02d}/H{date.hour:02d}00/AIA.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.{wavelength:04d}.*.fits' # Directory",
"args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s', filename=args.log_file) else: logging.basicConfig(level = logging.DEBUG",
"for the specified date and wavelengths''' file_paths = list() for wavelength in wavelengths:",
"glob from datetime import datetime, timedelta from job import Job, JobError from AIA_quality",
"job\\n%s', job) # Run the get_CH_map job return_code, output, error = job() #",
"f: text = f.readline() last_color = int(text.split(':')[1]) except Exception as why: logging.warning('Could not",
"logging.debug('Job ran without errors, output: %s', output) return segmented_map def create_CH_map(segmented_map, date, images):",
"errors, output: %s', output) return CH_map def track_maps(tracked_maps, untracked_maps, newly_tracked_maps): '''Run the tracking",
"+ HMI_images) # Add the CH map to the list of untracked maps",
"that overlaps with the previous tracking (see maxDeltaT) tracking_overlap = 6 # The",
"else: # We need to remove the untracked maps from the tracked maps",
"We need to remove the untracked maps from the tracked maps for untracked_map",
"else: logging.debug('Found last color %s from file %s', last_color, tracking_color_file) return last_color def",
"list(filter(lambda tracked_map: not os.path.samefile(tracked_map, untracked_map), tracked_maps)) # We will return the list of",
"maps untracked_maps.append(CH_map) # If we have enough untracked maps, we run the tracking",
"f: f.write(output) except Exception as why: logging.error('Could not write tracking color to file",
"Path to the tracking program config file tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config' # Path to",
"as why: logging.warning('Missing AIA files for date %s, skipping missing files!', date) continue",
"get_good_file(hmi_file_pattern.format(date=date)) if file_path is None: raise FileNotFoundError('HMI file for date %s was not",
"date %s was not found' % date) else: return [file_path] def create_segmented_map(AIA_images, date):",
"of previously tracked CH maps') parser.add_argument('--untracked_maps', '-u', metavar = 'MAP', nargs='*', help =",
"untracked_maps is None: untracked_maps = list() else: # We need to remove the",
"classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config' # Path to the centers file classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' #",
"Create the Segmented map segmented_map = create_segmented_map(AIA_images, date) # Create the CH map",
"images try: HMI_images = get_HMI_files(date) except FileNotFoundError as why: # It's okay if",
"get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' # Path to the get_CH_map program config file get_CH_map_config_file =",
"end_date = datetime.strptime(args.end_date, '%Y-%m-%d') if args.end_date else datetime.utcnow() # Run the SPoCA jobs",
"date and wavelengths''' file_paths = list() for wavelength in wavelengths: file_path = get_good_file(aia_file_pattern.format(date=date,",
"date %s and wavelength %s was not found' % (date, wavelengths)) else: file_paths.append(file_path)",
"a list of AIA files for the specified date and wavelengths''' file_paths =",
"images): '''Run the get_CH_map program''' # File path for the CH map to",
"file') parser.add_argument('--start_date', '-s', default = '2010-05-20', help = 'Start date of AIA files,",
"create_CH_map(segmented_map, date, AIA_images + HMI_images) # Add the CH map to the list",
"to create CH_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits') # Create a job for",
"job) # Run the tracking job return_code, output, error = job() # Check",
"+ untracked_maps def run_spoca_jobs(start_date, end_date, tracked_maps = None, untracked_maps = None): '''Run the",
"SPoCA jobs try: CH_maps = run_spoca_jobs(start_date, end_date, args.tracked_maps, args.untracked_maps) except Exception as why:",
"timedelta(hours = 4) # Path to the get_CH_map program get_CH_map_exec = '/opt/spoca4rwc/SPoCA/bin/get_CH_map.x' #",
"output, error = job() # Check if the job ran succesfully if return_code",
"step): '''Equivalent to range for date''' date = start.replace() while date < end:",
"'Could not find output file {CH_map}', CH_map = CH_map) else: logging.debug('Job ran without",
"= track_maps(tracked_maps, untracked_maps, newly_tracked_maps) else: logging.debug('Not enough maps to run tracking, need %s",
"the classification program classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x' # Path to the classification program config",
"raise FileNotFoundError('HMI file for date %s was not found' % date) else: return",
"Path to the tracking program tracking_exec = '/opt/spoca4rwc/SPoCA/bin/tracking.x' # Path to the tracking",
"return [file_path] def create_segmented_map(AIA_images, date): '''Run the classification program''' # File path for",
"untracked_maps # last color of the previous tracking last_color = parse_tracking_color_file(tracking_color_file) # Create",
"for the CH HMI_images = list() # Create the Segmented map segmented_map =",
"tracking_config_file, maxDeltaT = (tracking_overlap * classification_run_frequency).total_seconds(), newColor = last_color) logging.info('Running job\\n%s', job) #",
"= get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength)) if file_path is None: raise FileNotFoundError('AIA file for date %s",
"file_path = get_good_file(hmi_file_pattern.format(date=date)) if file_path is None: raise FileNotFoundError('HMI file for date %s",
"Start point of the script if __name__ == '__main__': # Get the arguments",
"the loop for date in date_range(start_date, end_date, classification_run_frequency): # Get the AIA files",
"# Create a job for the get_CH_map program with the appropriate parameters job",
"file for date %s and wavelength %s was not found' % (date, wavelengths))",
"end_date, tracked_maps = None, untracked_maps = None): '''Run the SPoCA jobs to create",
"files for the specified date and wavelengths''' file_paths = list() for wavelength in",
"'/opt/spoca4rwc/SPoCA/bin/tracking.x' # Path to the tracking program config file tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config' #",
"logging.warning('Could not read tracking color from file \"%s\": %s', tracking_color_file, why) return 0",
"job() # Check if the job ran succesfully if return_code != 0: raise",
"tracking on maps = tracked_maps[-tracking_overlap:] + untracked_maps # last color of the previous",
"elif not os.path.exists(CH_map): raise JobError(message = 'Could not find output file {CH_map}', CH_map",
"from glob import glob from datetime import datetime, timedelta from job import Job,",
"HMI files are located hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' # Wavelengths of AIA data to",
"def date_range(start, end, step): '''Equivalent to range for date''' date = start.replace() while",
"default = datetime.utcnow().strftime('%Y-%m-%d'), help = 'End date of AIA files, in form YYYY-MM-DD')",
"output) try: with open(tracking_color_file, 'tw') as f: f.write(output) except Exception as why: logging.error('Could",
"# Directory where the prepped HMI files are located hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' #",
"run the classification program on AIA_wavelengths = [193] def date_range(start, end, step): '''Equivalent",
"job return_code, output, error = job() # Check if the job ran succesfully",
"Directory to output the maps maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/' # Directory where the prepped",
"JobError(return_code, output, error, job_name = 'classification', AIA_images = AIA_images) elif not os.path.exists(segmented_map): raise",
"raise JobError(return_code, output, error, job_name = 'classification', AIA_images = AIA_images) elif not os.path.exists(segmented_map):",
"tracked_maps[-tracking_overlap:] + untracked_maps # last color of the previous tracking last_color = parse_tracking_color_file(tracking_color_file)",
"File path for the Segmented map to create segmented_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') +",
"yield date date += step def parse_tracking_color_file(tracking_color_file): try: with open(tracking_color_file, 'tr') as f:",
"untracked_maps, newly_tracked_maps): '''Run the tracking program''' # File paths of the maps to",
"untracked maps untracked_maps.append(CH_map) # If we have enough untracked maps, we run the",
"parser.parse_args() # Setup the logging if args.log_file: logging.basicConfig(level = logging.DEBUG if args.debug else",
"ran succesfully if return_code != 0: raise JobError(return_code, output, error, job_name = 'classification',",
"why) return tracked_maps + untracked_maps, [], newly_tracked_maps + untracked_maps def run_spoca_jobs(start_date, end_date, tracked_maps",
"as f: text = f.readline() last_color = int(text.split(':')[1]) except Exception as why: logging.warning('Could",
"%(levelname)-8s : %(message)s', filename=args.log_file) else: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s",
"raise JobError(return_code, output, error, job_name = 'tracking', maps = maps) else: logging.debug('Job ran",
"last_color def get_good_file(file_pattern, ignore_bits = None): '''Return the first file that matches the",
"for the CH map to create CH_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits') #",
"number of CH maps to run the tracking program on tracking_run_count = 3",
"%s', output) return CH_map def track_maps(tracked_maps, untracked_maps, newly_tracked_maps): '''Run the tracking program''' #",
"datetime, timedelta from job import Job, JobError from AIA_quality import get_quality, get_quality_errors #",
"'.SegmentedMap.fits') # Create a job for the classification program with the appropriate parameters",
"without errors, output: %s', output) try: with open(tracking_color_file, 'tw') as f: f.write(output) except",
"assume none are if untracked_maps is None: untracked_maps = list() else: # We",
"remove the untracked maps from the tracked maps for untracked_map in untracked_maps: tracked_maps",
"why: # It's okay if HMI files are missing, we just won't have",
"CH HMI_images = list() # Create the Segmented map segmented_map = create_segmented_map(AIA_images, date)",
"classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt' # The frequency to run the classification program classification_run_frequency =",
"newly tracked maps newly_tracked_maps = list() # Start the loop for date in",
"if __name__ == '__main__': # Get the arguments parser = argparse.ArgumentParser(description = 'Create",
"if len(untracked_maps) >= tracking_run_count: tracked_maps, untracked_maps, newly_tracked_maps = track_maps(tracked_maps, untracked_maps, newly_tracked_maps) else: logging.debug('Not",
"jobs try: CH_maps = run_spoca_jobs(start_date, end_date, args.tracked_maps, args.untracked_maps) except Exception as why: logging.critical(str(why))",
"to run the classification program classification_run_frequency = timedelta(hours = 4) # Path to",
"= '/opt/spoca4rwc/scripts/AIA_CH_tracking.config' # Path to the tracking color file tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' #",
"import argparse from glob import glob from datetime import datetime, timedelta from job",
"not write tracking color to file \"%s\": %s', tracking_color_file, why) return tracked_maps +",
"tracked_maps = sorted(glob(os.path.join(maps_directory, '*.CHMap.fits'))) # If no untracked maps were given, we assume",
"Job, JobError from AIA_quality import get_quality, get_quality_errors # Path to the classification program",
"succesfully if return_code != 0: raise JobError(return_code, output, error, job_name = 'get_CH_map', segmented_map",
"AIA files for date %s, skipping missing files!', date) continue # Get the",
"quality: %s', file_path, get_quality_errors(quality)) def get_AIA_files(date, wavelengths): '''Return a list of AIA files",
"untracked_maps = None): '''Run the SPoCA jobs to create and track the CHMaps'''",
"%(message)s') # Parse the start and end date start_date = datetime.strptime(args.start_date, '%Y-%m-%d') end_date",
"file_path else: logging.info('Skipping file %s with bad quality: %s', file_path, get_quality_errors(quality)) def get_AIA_files(date,",
"the tracking job return_code, output, error = job() # Check if the job",
"JobError(return_code, output, error, job_name = 'get_CH_map', segmented_map = segmented_map) elif not os.path.exists(CH_map): raise",
"wavelength=wavelength)) if file_path is None: raise FileNotFoundError('AIA file for date %s and wavelength",
"def get_good_file(file_pattern, ignore_bits = None): '''Return the first file that matches the file_pattern",
"tracking_color_file, why) return tracked_maps + untracked_maps, [], newly_tracked_maps + untracked_maps def run_spoca_jobs(start_date, end_date,",
"to range for date''' date = start.replace() while date < end: yield date",
"# Path to the tracking color file tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt' # The minimum",
"logging if args.log_file: logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s",
"if ignore_bits is None: quality = get_quality(file_path) else: quality = get_quality(file_path, ignore_bits) #",
"to create segmented_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits') # Create a job for",
"'*.CHMap.fits'))) # If no untracked maps were given, we assume none are if",
"the start and end date start_date = datetime.strptime(args.start_date, '%Y-%m-%d') end_date = datetime.strptime(args.end_date, '%Y-%m-%d')",
"date start_date = datetime.strptime(args.start_date, '%Y-%m-%d') end_date = datetime.strptime(args.end_date, '%Y-%m-%d') if args.end_date else datetime.utcnow()",
"else: logging.debug('Job ran without errors, output: %s', output) return CH_map def track_maps(tracked_maps, untracked_maps,",
"file that matches the file_pattern and has a good quality''' for file_path in",
"if the job ran succesfully if return_code != 0: raise JobError(return_code, output, error,",
"newly_tracked_maps # Start point of the script if __name__ == '__main__': # Get",
"classification_config_file, centersFile = classification_centers_file, output = segmented_map) logging.info('Running job\\n%s', job) # Run the",
"AIA_images = get_AIA_files(date, AIA_wavelengths) except FileNotFoundError as why: logging.warning('Missing AIA files for date",
"%s but have only %s', tracking_run_count, len(untracked_maps)) # Track the remaing untracked maps",
"tracking, need %s but have only %s', tracking_run_count, len(untracked_maps)) # Track the remaing",
"%s was not found' % date) else: return [file_path] def create_segmented_map(AIA_images, date): '''Run",
"AIA_quality import get_quality, get_quality_errors # Path to the classification program classification_exec = '/opt/spoca4rwc/SPoCA/bin/classification.x'",
"file classification_config_file = '/opt/spoca4rwc/scripts/AIA_CH_classification.config' # Path to the centers file classification_centers_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/centers.txt'",
"untracked_maps, newly_tracked_maps) return newly_tracked_maps # Start point of the script if __name__ ==",
"date) # Create the CH map CH_map = create_CH_map(segmented_map, date, AIA_images + HMI_images)",
"previously tracked CH maps') parser.add_argument('--untracked_maps', '-u', metavar = 'MAP', nargs='*', help = 'File",
"assumed all existing are if tracked_maps is None: tracked_maps = sorted(glob(os.path.join(maps_directory, '*.CHMap.fits'))) #",
"run the classification program classification_run_frequency = timedelta(hours = 4) # Path to the",
"map CH_map = create_CH_map(segmented_map, date, AIA_images + HMI_images) # Add the CH map",
"tracked_maps = list(filter(lambda tracked_map: not os.path.samefile(tracked_map, untracked_map), tracked_maps)) # We will return the",
"newly_tracked_maps = list() # Start the loop for date in date_range(start_date, end_date, classification_run_frequency):",
"# Directory to output the maps maps_directory = '/data/spoca/spoca4rwc/quicklook/CH_maps/' # Directory where the",
"if return_code != 0: raise JobError(return_code, output, error, job_name = 'get_CH_map', segmented_map =",
"= os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.SegmentedMap.fits') # Create a job for the classification program",
"the previous tracking last_color = parse_tracking_color_file(tracking_color_file) # Create a job for the tracking",
"logging.error('Could not write tracking color to file \"%s\": %s', tracking_color_file, why) return tracked_maps",
"if return_code != 0: raise JobError(return_code, output, error, job_name = 'tracking', maps =",
"os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits') # Create a job for the get_CH_map program with",
"date = start.replace() while date < end: yield date date += step def",
"logging.basicConfig(level = logging.DEBUG if args.debug else logging.INFO, format='%(asctime)s : %(levelname)-8s : %(message)s', filename=args.log_file)",
"that matches the file_pattern and has a good quality''' for file_path in sorted(glob(file_pattern)):",
"CH_map = CH_map) else: logging.debug('Job ran without errors, output: %s', output) return CH_map",
"skipping missing files!', date) continue # Get the list of HMI images try:",
"program classification_run_frequency = timedelta(hours = 4) # Path to the get_CH_map program get_CH_map_exec",
"create CH_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') + '.CHMap.fits') # Create a job for the",
"HMI images try: HMI_images = get_HMI_files(date) except FileNotFoundError as why: # It's okay",
"wavelength in wavelengths: file_path = get_good_file(aia_file_pattern.format(date=date, wavelength=wavelength)) if file_path is None: raise FileNotFoundError('AIA",
"not find output file {segmented_map}', segmented_map = segmented_map) else: logging.debug('Job ran without errors,",
"program with the appropriate parameters job = Job(tracking_exec, *maps, config = tracking_config_file, maxDeltaT",
"point of the script if __name__ == '__main__': # Get the arguments parser",
"File path for the CH map to create CH_map = os.path.join(maps_directory, date.strftime('%Y%m%d_%H%M%S') +",
"the CH map to the list of untracked maps untracked_maps.append(CH_map) # If we",
"= list() # Create the Segmented map segmented_map = create_segmented_map(AIA_images, date) # Create",
"prepped HMI files are located hmi_file_pattern = '/data/SDO/public/HMI_quicklook/magnetogram/{date.year:04d}/{date.month:02d}/{date.day:02d}/HMI.{date.year:04d}{date.month:02d}{date.day:02d}_{date.hour:02d}*.*.fits' # Wavelengths of AIA data",
"file_paths def get_HMI_files(date): '''Return a list of HMI files for the specified date'''",
"tracking_config_file = '/opt/spoca4rwc/scripts/AIA_CH_tracking.config' # Path to the tracking color file tracking_color_file = '/data/spoca/spoca4rwc/quicklook/CH_maps/tracking_color.txt'",
"the tracking program on tracking_run_count = 3 # Directory to output the maps",
"the specified date''' file_path = get_good_file(hmi_file_pattern.format(date=date)) if file_path is None: raise FileNotFoundError('HMI file",
"0 else: logging.debug('Found last color %s from file %s', last_color, tracking_color_file) return last_color",
"program''' # File paths of the maps to run the tracking on maps",
"for the tracking program with the appropriate parameters job = Job(tracking_exec, *maps, config",
"= maps) else: logging.debug('Job ran without errors, output: %s', output) try: with open(tracking_color_file,",
"loop for date in date_range(start_date, end_date, classification_run_frequency): # Get the AIA files for",
"maps') parser.add_argument('--debug', '-d', default = False, action = 'store_true', help = 'Set the",
"as why: logging.warning('Could not read tracking color from file \"%s\": %s', tracking_color_file, why)",
"'store_true', help = 'Set the logging level to debug') parser.add_argument('--log_file', '-l', help =",
"need to remove the untracked maps from the tracked maps for untracked_map in",
"elif not os.path.exists(segmented_map): raise JobError(message = 'Could not find output file {segmented_map}', segmented_map",
"jobs to create and track the CHMaps''' # If no tracked maps were",
"= 'File paths of not yet tracked CH maps') args = parser.parse_args() #",
"The frequency to run the classification program classification_run_frequency = timedelta(hours = 4) #",
"raise JobError(message = 'Could not find output file {CH_map}', CH_map = CH_map) else:",
"output file {CH_map}', CH_map = CH_map) else: logging.debug('Job ran without errors, output: %s',",
"tracking job return_code, output, error = job() # Check if the job ran",
"Get the AIA files for the classification try: AIA_images = get_AIA_files(date, AIA_wavelengths) except",
"maps from the tracked maps for untracked_map in untracked_maps: tracked_maps = list(filter(lambda tracked_map:",
"create and track the CHMaps''' # If no tracked maps were given, we",
"date of AIA files, in form YYYY-MM-DD') parser.add_argument('--tracked_maps', '-t', metavar = 'MAP', nargs='*',"
] |
[
"op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True)) op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True)) op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False) #",
"identifiers, used by Alembic. revision = 'd385c3eb6937' down_revision = 'ee2cbe4166fb' branch_labels = None",
"Date: 2018-02-16 11:23:29.705565 \"\"\" from alembic import op import sqlalchemy as sa #",
"nullable=True)) op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True)) op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False) # ### end Alembic",
"by Alembic. revision = 'd385c3eb6937' down_revision = 'ee2cbe4166fb' branch_labels = None depends_on =",
"= 'd385c3eb6937' down_revision = 'ee2cbe4166fb' branch_labels = None depends_on = None def upgrade():",
"Create Date: 2018-02-16 11:23:29.705565 \"\"\" from alembic import op import sqlalchemy as sa",
"as sa # revision identifiers, used by Alembic. revision = 'd385c3eb6937' down_revision =",
"# ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_user_surname'), table_name='user')",
"revision identifiers, used by Alembic. revision = 'd385c3eb6937' down_revision = 'ee2cbe4166fb' branch_labels =",
"revision = 'd385c3eb6937' down_revision = 'ee2cbe4166fb' branch_labels = None depends_on = None def",
"auto generated by Alembic - please adjust! ### op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True)) op.add_column('user',",
"please adjust! ### op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True)) op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True)) op.create_index(op.f('ix_user_surname'), 'user',",
"generated by Alembic - please adjust! ### op.drop_index(op.f('ix_user_surname'), table_name='user') op.drop_column('user', 'surname') op.drop_column('user', 'name')",
"sa.Column('surname', sa.String(length=20), nullable=True)) op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False) # ### end Alembic commands ###",
"alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic.",
"Revision ID: d385c3eb6937 Revises: ee2cbe4166fb Create Date: 2018-02-16 11:23:29.705565 \"\"\" from alembic import",
"sa.String(length=20), nullable=True)) op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False) # ### end Alembic commands ### def",
"ID: d385c3eb6937 Revises: ee2cbe4166fb Create Date: 2018-02-16 11:23:29.705565 \"\"\" from alembic import op",
"2018-02-16 11:23:29.705565 \"\"\" from alembic import op import sqlalchemy as sa # revision",
"### commands auto generated by Alembic - please adjust! ### op.add_column('user', sa.Column('name', sa.String(length=20),",
"['surname'], unique=False) # ### end Alembic commands ### def downgrade(): # ### commands",
"set to PK Revision ID: d385c3eb6937 Revises: ee2cbe4166fb Create Date: 2018-02-16 11:23:29.705565 \"\"\"",
"auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_user_surname'), table_name='user') op.drop_column('user', 'surname') op.drop_column('user',",
"op import sqlalchemy as sa # revision identifiers, used by Alembic. revision =",
"upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('user',",
"def downgrade(): # ### commands auto generated by Alembic - please adjust! ###",
"used by Alembic. revision = 'd385c3eb6937' down_revision = 'ee2cbe4166fb' branch_labels = None depends_on",
"Alembic - please adjust! ### op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True)) op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True))",
"downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_user_surname'),",
"please adjust! ### op.drop_index(op.f('ix_user_surname'), table_name='user') op.drop_column('user', 'surname') op.drop_column('user', 'name') # ### end Alembic",
"adjust! ### op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True)) op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True)) op.create_index(op.f('ix_user_surname'), 'user', ['surname'],",
"op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False) # ### end Alembic commands ### def downgrade(): #",
"\"\"\"USERS - Autoincrement set to PK Revision ID: d385c3eb6937 Revises: ee2cbe4166fb Create Date:",
"Autoincrement set to PK Revision ID: d385c3eb6937 Revises: ee2cbe4166fb Create Date: 2018-02-16 11:23:29.705565",
"import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision",
"down_revision = 'ee2cbe4166fb' branch_labels = None depends_on = None def upgrade(): # ###",
"### def downgrade(): # ### commands auto generated by Alembic - please adjust!",
"None depends_on = None def upgrade(): # ### commands auto generated by Alembic",
"by Alembic - please adjust! ### op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True)) op.add_column('user', sa.Column('surname', sa.String(length=20),",
"generated by Alembic - please adjust! ### op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True)) op.add_column('user', sa.Column('surname',",
"sa.String(length=20), nullable=True)) op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True)) op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False) # ### end",
"sa # revision identifiers, used by Alembic. revision = 'd385c3eb6937' down_revision = 'ee2cbe4166fb'",
"- Autoincrement set to PK Revision ID: d385c3eb6937 Revises: ee2cbe4166fb Create Date: 2018-02-16",
"commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_user_surname'), table_name='user') op.drop_column('user', 'surname')",
"- please adjust! ### op.drop_index(op.f('ix_user_surname'), table_name='user') op.drop_column('user', 'surname') op.drop_column('user', 'name') # ### end",
"PK Revision ID: d385c3eb6937 Revises: ee2cbe4166fb Create Date: 2018-02-16 11:23:29.705565 \"\"\" from alembic",
"Alembic commands ### def downgrade(): # ### commands auto generated by Alembic -",
"branch_labels = None depends_on = None def upgrade(): # ### commands auto generated",
"\"\"\" from alembic import op import sqlalchemy as sa # revision identifiers, used",
"Alembic - please adjust! ### op.drop_index(op.f('ix_user_surname'), table_name='user') op.drop_column('user', 'surname') op.drop_column('user', 'name') # ###",
"### op.drop_index(op.f('ix_user_surname'), table_name='user') op.drop_column('user', 'surname') op.drop_column('user', 'name') # ### end Alembic commands ###",
"end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic",
"by Alembic - please adjust! ### op.drop_index(op.f('ix_user_surname'), table_name='user') op.drop_column('user', 'surname') op.drop_column('user', 'name') #",
"'d385c3eb6937' down_revision = 'ee2cbe4166fb' branch_labels = None depends_on = None def upgrade(): #",
"import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd385c3eb6937'",
"nullable=True)) op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False) # ### end Alembic commands ### def downgrade():",
"def upgrade(): # ### commands auto generated by Alembic - please adjust! ###",
"commands auto generated by Alembic - please adjust! ### op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True))",
"commands ### def downgrade(): # ### commands auto generated by Alembic - please",
"= None depends_on = None def upgrade(): # ### commands auto generated by",
"# revision identifiers, used by Alembic. revision = 'd385c3eb6937' down_revision = 'ee2cbe4166fb' branch_labels",
"'user', ['surname'], unique=False) # ### end Alembic commands ### def downgrade(): # ###",
"sa.Column('name', sa.String(length=20), nullable=True)) op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True)) op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False) # ###",
"adjust! ### op.drop_index(op.f('ix_user_surname'), table_name='user') op.drop_column('user', 'surname') op.drop_column('user', 'name') # ### end Alembic commands",
"= None def upgrade(): # ### commands auto generated by Alembic - please",
"unique=False) # ### end Alembic commands ### def downgrade(): # ### commands auto",
"op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True)) op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False) # ### end Alembic commands",
"sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd385c3eb6937' down_revision",
"Revises: ee2cbe4166fb Create Date: 2018-02-16 11:23:29.705565 \"\"\" from alembic import op import sqlalchemy",
"### op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True)) op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True)) op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False)",
"None def upgrade(): # ### commands auto generated by Alembic - please adjust!",
"depends_on = None def upgrade(): # ### commands auto generated by Alembic -",
"# ### end Alembic commands ### def downgrade(): # ### commands auto generated",
"### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f('ix_user_surname'), table_name='user') op.drop_column('user',",
"# ### commands auto generated by Alembic - please adjust! ### op.add_column('user', sa.Column('name',",
"d385c3eb6937 Revises: ee2cbe4166fb Create Date: 2018-02-16 11:23:29.705565 \"\"\" from alembic import op import",
"from alembic import op import sqlalchemy as sa # revision identifiers, used by",
"ee2cbe4166fb Create Date: 2018-02-16 11:23:29.705565 \"\"\" from alembic import op import sqlalchemy as",
"### end Alembic commands ### def downgrade(): # ### commands auto generated by",
"= 'ee2cbe4166fb' branch_labels = None depends_on = None def upgrade(): # ### commands",
"Alembic. revision = 'd385c3eb6937' down_revision = 'ee2cbe4166fb' branch_labels = None depends_on = None",
"'ee2cbe4166fb' branch_labels = None depends_on = None def upgrade(): # ### commands auto",
"to PK Revision ID: d385c3eb6937 Revises: ee2cbe4166fb Create Date: 2018-02-16 11:23:29.705565 \"\"\" from",
"- please adjust! ### op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True)) op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True)) op.create_index(op.f('ix_user_surname'),",
"11:23:29.705565 \"\"\" from alembic import op import sqlalchemy as sa # revision identifiers,"
] |
[
"لوباء كوفيد-19\", tweet.full_text) or re.search( r\"حصيلة وباء كورونا كوفيد-19 ليوم\", tweet.full_text) or re.search(",
"if(new_deaths_line): new_deaths = int( re.search(r'\\d+', new_deaths_line[0]).group(0)) else: if(re.findall( \"^.*وفاة واحدة.*$\", tweet.full_text, re.MULTILINE)[0]): new_deaths",
"= int( re.search(r'\\d+', new_deaths_line[0]).group(0)) else: if(re.findall( \"^.*وفاة واحدة.*$\", tweet.full_text, re.MULTILINE)[0]): new_deaths = 1",
"exclude_replies=False, ).items(num_tweets) return tweets class TwitterCollectorBase: def __init__(self, api, username: str, location: str,",
"or re.search( r\"نوافيكم بالحصيلة الكاملة\", tweet.full_text) match2 = re.search(r\"العناية المركز\", tweet.full_text) if match",
"TwitterAPI: def __init__(self, consumer_key: str, consumer_secret: str): self._api = self._get_api(consumer_key, consumer_secret) def _get_api(self,",
") return df def build_post_url(self, tweet_id: str): return f\"https://twitter.com/{self.username}/status/{tweet_id}\" def merge_with_current_data(self, df: pd.DataFrame)",
".sort_values(\"date\") ) return df def build_post_url(self, tweet_id: str): return f\"https://twitter.com/{self.username}/status/{tweet_id}\" def merge_with_current_data(self, df:",
"\"10\", 'كتوبر': \"10\", 'نوفمبر': \"11\", 'ديسمبر': \"12\" }[month] def main(): api = TwitterAPI(TWITTER_CONSUMER_KEY,",
"os import pandas as pd import re import tweepy try: from config import",
"_set_output_path(self, paths, output_path): if output_path is None: if paths is not None: return",
"\"./algeria-covid19-icu-data.csv\" self._data_old = self._get_current_data() def _set_output_path(self, paths, output_path): if output_path is None: if",
"output_path): if output_path is None: if paths is not None: return paths.tmp_vax_out_proposal(self.location) else:",
"raise NotImplementedError def propose_df(self): df = ( self._propose_df() .pipe(self.merge_with_current_data) .sort_values(\"date\") ) return df",
"came from the COVID-19 Dataset project by Our World in Data # https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter",
"= self.propose_df() df.to_csv(self.output_path, index=False) class Algeria(TwitterCollectorBase): def __init__(self, api, **kwargs): super().__init__( api=api, username=\"Sante_Gouv_dz\",",
"dt_match = re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text) dt = dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if self.stop_search(dt): break new_cases_line",
"واحدة.*$\", tweet.full_text, re.MULTILINE)[0]): new_deaths = 1 data.append({ \"date\": dt, \"new_cases\": new_cases, \"recoveries\": recoveries,",
"or re.search( r\"حصيلة وباء كورونا كوفيد-19 ليوم\", tweet.full_text) or re.search( r\"نوافيكم بالحصيلة الكاملة\",",
"self.location = location self.tweets = api.get_tweets(self.username, num_tweets) self.tweets_relevant = [] self.output_path = \"./algeria-covid19-icu-data.csv\"",
"if output_path is None: if paths is not None: return paths.tmp_vax_out_proposal(self.location) else: raise",
"self._propose_df() .pipe(self.merge_with_current_data) .sort_values(\"date\") ) return df def build_post_url(self, tweet_id: str): return f\"https://twitter.com/{self.username}/status/{tweet_id}\" def",
"re.findall( \"^.*وفيات.*$\", tweet.full_text, re.MULTILINE) if(new_deaths_line): new_deaths = int( re.search(r'\\d+', new_deaths_line[0]).group(0)) else: if(re.findall( \"^.*وفاة",
"df = pd.concat([df, df_current]).sort_values(by=\"date\") return df def stop_search(self, dt): if self._data_old is None:",
"paths is not None: return paths.tmp_vax_out_proposal(self.location) else: raise AttributeError( \"Either specify attribute `paths`",
"is None: return False elif dt >= self.last_update: return False elif dt <",
"self._data_old[~self._data_old.date.isin(df.date)] df = pd.concat([df, df_current]).sort_values(by=\"date\") return df def stop_search(self, dt): if self._data_old is",
"re.findall( \"^.*للشفاء.*$\", tweet.full_text, re.MULTILINE)[0] recoveries = int(re.search(r'\\d+', recoveries_line).group(0)) in_icu_line = re.findall( \"^.*العناية المركز.*$\",",
"'جويلية': \"07\", 'اوت': \"08\", 'أوت': \"08\", 'سبتمبر': \"09\", 'أكتوبر': \"10\", 'اكتوبر': \"10\", 'كتوبر':",
"portion of the code came from the COVID-19 Dataset project by Our World",
"arabicMonthToNum(month): return { 'جانفي': \"01\", 'فيفري': \"02\", 'مارس': \"03\", 'أفريل': \"04\", 'ماي': \"05\",",
"num_tweets=30): tweets = tweepy.Cursor(self._api.user_timeline, screen_name=username, include_rts=False, tweet_mode='extended', exclude_replies=False, ).items(num_tweets) return tweets class TwitterCollectorBase:",
"}) df = pd.DataFrame(data) return df def arabicMonthToNum(month): return { 'جانفي': \"01\", 'فيفري':",
"new_deaths = 1 data.append({ \"date\": dt, \"new_cases\": new_cases, \"recoveries\": recoveries, \"in_icu\": in_icu, \"death\":",
"tweet_mode='extended', exclude_replies=False, ).items(num_tweets) return tweets class TwitterCollectorBase: def __init__(self, api, username: str, location:",
"self._data_old is None: return False elif dt >= self.last_update: return False elif dt",
"self._data_old.date.max() else: return None def _propose_df(self): raise NotImplementedError def propose_df(self): df = (",
"new_cases_line = re.findall( \"^.*جديدة.*$\", tweet.full_text, re.MULTILINE)[0] new_cases = int(re.search(r'\\d+', new_cases_line).group(0)) recoveries_line = re.findall(",
"if(re.findall( \"^.*وفاة واحدة.*$\", tweet.full_text, re.MULTILINE)[0]): new_deaths = 1 data.append({ \"date\": dt, \"new_cases\": new_cases,",
"TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET') class TwitterAPI: def __init__(self, consumer_key: str, consumer_secret:",
"Mainly contributed by <NAME> https://github.com/lucasrodes # The code is under completely open access",
"in Data # https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter # Mainly contributed by <NAME> https://github.com/lucasrodes # The code",
"_get_api(self, consumer_key, consumer_secret): auth = tweepy.AppAuthHandler(consumer_key, consumer_secret) return tweepy.API(auth) def get_tweets(self, username, num_tweets=30):",
".pipe(self.merge_with_current_data) .sort_values(\"date\") ) return df def build_post_url(self, tweet_id: str): return f\"https://twitter.com/{self.username}/status/{tweet_id}\" def merge_with_current_data(self,",
"< self.last_update: return True def to_csv(self): df = self.propose_df() df.to_csv(self.output_path, index=False) class Algeria(TwitterCollectorBase):",
"df = pd.DataFrame(data) return df def arabicMonthToNum(month): return { 'جانفي': \"01\", 'فيفري': \"02\",",
"is under completely open access under the Creative Commons BY license # https://creativecommons.org/licenses/by/4.0/",
"def propose_df(self): df = ( self._propose_df() .pipe(self.merge_with_current_data) .sort_values(\"date\") ) return df def build_post_url(self,",
"from config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET except ImportError: TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET')",
"return tweets class TwitterCollectorBase: def __init__(self, api, username: str, location: str, num_tweets=100): self.username",
"'اكتوبر': \"10\", 'كتوبر': \"10\", 'نوفمبر': \"11\", 'ديسمبر': \"12\" }[month] def main(): api =",
"\"^.*جديدة.*$\", tweet.full_text, re.MULTILINE)[0] new_cases = int(re.search(r'\\d+', new_cases_line).group(0)) recoveries_line = re.findall( \"^.*للشفاء.*$\", tweet.full_text, re.MULTILINE)[0]",
"\"03\", 'أفريل': \"04\", 'ماي': \"05\", 'جوان': \"06\", 'جويلية': \"07\", 'اوت': \"08\", 'أوت': \"08\",",
"by <NAME> https://github.com/lucasrodes # The code is under completely open access under the",
"dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if self.stop_search(dt): break new_cases_line = re.findall( \"^.*جديدة.*$\", tweet.full_text, re.MULTILINE)[0] new_cases =",
"large portion of the code came from the COVID-19 Dataset project by Our",
"# The code is under completely open access under the Creative Commons BY",
"re.search(r\"مؤشرات الترصد لوباء كوفيد-19\", tweet.full_text) or re.search( r\"حصيلة وباء كورونا كوفيد-19 ليوم\", tweet.full_text)",
"self._api = self._get_api(consumer_key, consumer_secret) def _get_api(self, consumer_key, consumer_secret): auth = tweepy.AppAuthHandler(consumer_key, consumer_secret) return",
"or method argument `output_path`\") def _get_current_data(self): if os.path.isfile(self.output_path): return pd.read_csv(self.output_path) else: None @property",
"new_cases = int(re.search(r'\\d+', new_cases_line).group(0)) recoveries_line = re.findall( \"^.*للشفاء.*$\", tweet.full_text, re.MULTILINE)[0] recoveries = int(re.search(r'\\d+',",
"re.search(r'\\d+', new_deaths_line[0]).group(0)) else: if(re.findall( \"^.*وفاة واحدة.*$\", tweet.full_text, re.MULTILINE)[0]): new_deaths = 1 data.append({ \"date\":",
"self.build_post_url(tweet.id), }) df = pd.DataFrame(data) return df def arabicMonthToNum(month): return { 'جانفي': \"01\",",
"tweets class TwitterCollectorBase: def __init__(self, api, username: str, location: str, num_tweets=100): self.username =",
"**kwargs ) def _propose_df(self): data = [] for tweet in self.tweets: match =",
"return self._data_old if self._data_old is not None: df_current = self._data_old[~self._data_old.date.isin(df.date)] df = pd.concat([df,",
"( self._propose_df() .pipe(self.merge_with_current_data) .sort_values(\"date\") ) return df def build_post_url(self, tweet_id: str): return f\"https://twitter.com/{self.username}/status/{tweet_id}\"",
"tweet.full_text) match2 = re.search(r\"العناية المركز\", tweet.full_text) if match and match2: dt_match = re.search(",
"self.tweets = api.get_tweets(self.username, num_tweets) self.tweets_relevant = [] self.output_path = \"./algeria-covid19-icu-data.csv\" self._data_old = self._get_current_data()",
"https://creativecommons.org/licenses/by/4.0/ import os import pandas as pd import re import tweepy try: from",
"= os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET') class TwitterAPI: def __init__(self, consumer_key: str, consumer_secret: str):",
"if df.empty: return self._data_old if self._data_old is not None: df_current = self._data_old[~self._data_old.date.isin(df.date)] df",
"`paths` or method argument `output_path`\") def _get_current_data(self): if os.path.isfile(self.output_path): return pd.read_csv(self.output_path) else: None",
"pd import re import tweepy try: from config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET except ImportError:",
"= pd.concat([df, df_current]).sort_values(by=\"date\") return df def stop_search(self, dt): if self._data_old is None: return",
"stop_search(self, dt): if self._data_old is None: return False elif dt >= self.last_update: return",
"super().__init__( api=api, username=\"Sante_Gouv_dz\", location=\"Algeria\", **kwargs ) def _propose_df(self): data = [] for tweet",
"\"10\", 'نوفمبر': \"11\", 'ديسمبر': \"12\" }[month] def main(): api = TwitterAPI(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) Algeria(api).to_csv()",
"dt >= self.last_update: return False elif dt < self.last_update: return True def to_csv(self):",
"tweet.full_text, \"source_url\": self.build_post_url(tweet.id), }) df = pd.DataFrame(data) return df def arabicMonthToNum(month): return {",
"= ( self._propose_df() .pipe(self.merge_with_current_data) .sort_values(\"date\") ) return df def build_post_url(self, tweet_id: str): return",
"tweepy.Cursor(self._api.user_timeline, screen_name=username, include_rts=False, tweet_mode='extended', exclude_replies=False, ).items(num_tweets) return tweets class TwitterCollectorBase: def __init__(self, api,",
"'جوان': \"06\", 'جويلية': \"07\", 'اوت': \"08\", 'أوت': \"08\", 'سبتمبر': \"09\", 'أكتوبر': \"10\", 'اكتوبر':",
"pd.DataFrame) -> pd.DataFrame: if df.empty: return self._data_old if self._data_old is not None: df_current",
"= re.findall( \"^.*وفيات.*$\", tweet.full_text, re.MULTILINE) if(new_deaths_line): new_deaths = int( re.search(r'\\d+', new_deaths_line[0]).group(0)) else: if(re.findall(",
"\"08\", 'أوت': \"08\", 'سبتمبر': \"09\", 'أكتوبر': \"10\", 'اكتوبر': \"10\", 'كتوبر': \"10\", 'نوفمبر': \"11\",",
"df_current]).sort_values(by=\"date\") return df def stop_search(self, dt): if self._data_old is None: return False elif",
"match2: dt_match = re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text) dt = dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if self.stop_search(dt): break",
"tweet_id: str): return f\"https://twitter.com/{self.username}/status/{tweet_id}\" def merge_with_current_data(self, df: pd.DataFrame) -> pd.DataFrame: if df.empty: return",
"[] self.output_path = \"./algeria-covid19-icu-data.csv\" self._data_old = self._get_current_data() def _set_output_path(self, paths, output_path): if output_path",
"= int(re.search(r'\\d+', recoveries_line).group(0)) in_icu_line = re.findall( \"^.*العناية المركز.*$\", tweet.full_text, re.MULTILINE)[0] in_icu = int(re.search(r'\\d+',",
"\"^.*للشفاء.*$\", tweet.full_text, re.MULTILINE)[0] recoveries = int(re.search(r'\\d+', recoveries_line).group(0)) in_icu_line = re.findall( \"^.*العناية المركز.*$\", tweet.full_text,",
"= re.findall( \"^.*للشفاء.*$\", tweet.full_text, re.MULTILINE)[0] recoveries = int(re.search(r'\\d+', recoveries_line).group(0)) in_icu_line = re.findall( \"^.*العناية",
"df_current = self._data_old[~self._data_old.date.isin(df.date)] df = pd.concat([df, df_current]).sort_values(by=\"date\") return df def stop_search(self, dt): if",
"r\"نوافيكم بالحصيلة الكاملة\", tweet.full_text) match2 = re.search(r\"العناية المركز\", tweet.full_text) if match and match2:",
"TwitterCollectorBase: def __init__(self, api, username: str, location: str, num_tweets=100): self.username = username self.location",
"if self.stop_search(dt): break new_cases_line = re.findall( \"^.*جديدة.*$\", tweet.full_text, re.MULTILINE)[0] new_cases = int(re.search(r'\\d+', new_cases_line).group(0))",
"World in Data # https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter # Mainly contributed by <NAME> https://github.com/lucasrodes # The",
"def get_tweets(self, username, num_tweets=30): tweets = tweepy.Cursor(self._api.user_timeline, screen_name=username, include_rts=False, tweet_mode='extended', exclude_replies=False, ).items(num_tweets) return",
"المركز\", tweet.full_text) if match and match2: dt_match = re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text) dt =",
"def __init__(self, consumer_key: str, consumer_secret: str): self._api = self._get_api(consumer_key, consumer_secret) def _get_api(self, consumer_key,",
"الكاملة\", tweet.full_text) match2 = re.search(r\"العناية المركز\", tweet.full_text) if match and match2: dt_match =",
"elif dt < self.last_update: return True def to_csv(self): df = self.propose_df() df.to_csv(self.output_path, index=False)",
"not None: return self._data_old.date.max() else: return None def _propose_df(self): raise NotImplementedError def propose_df(self):",
"tweet.full_text, re.MULTILINE)[0] new_cases = int(re.search(r'\\d+', new_cases_line).group(0)) recoveries_line = re.findall( \"^.*للشفاء.*$\", tweet.full_text, re.MULTILINE)[0] recoveries",
"open access under the Creative Commons BY license # https://creativecommons.org/licenses/by/4.0/ import os import",
"self.output_path = \"./algeria-covid19-icu-data.csv\" self._data_old = self._get_current_data() def _set_output_path(self, paths, output_path): if output_path is",
"tweepy.API(auth) def get_tweets(self, username, num_tweets=30): tweets = tweepy.Cursor(self._api.user_timeline, screen_name=username, include_rts=False, tweet_mode='extended', exclude_replies=False, ).items(num_tweets)",
"re.MULTILINE)[0]): new_deaths = 1 data.append({ \"date\": dt, \"new_cases\": new_cases, \"recoveries\": recoveries, \"in_icu\": in_icu,",
"= int(re.search(r'\\d+', new_cases_line).group(0)) recoveries_line = re.findall( \"^.*للشفاء.*$\", tweet.full_text, re.MULTILINE)[0] recoveries = int(re.search(r'\\d+', recoveries_line).group(0))",
"consumer_secret: str): self._api = self._get_api(consumer_key, consumer_secret) def _get_api(self, consumer_key, consumer_secret): auth = tweepy.AppAuthHandler(consumer_key,",
"else: if(re.findall( \"^.*وفاة واحدة.*$\", tweet.full_text, re.MULTILINE)[0]): new_deaths = 1 data.append({ \"date\": dt, \"new_cases\":",
"under completely open access under the Creative Commons BY license # https://creativecommons.org/licenses/by/4.0/ import",
"\"06\", 'جويلية': \"07\", 'اوت': \"08\", 'أوت': \"08\", 'سبتمبر': \"09\", 'أكتوبر': \"10\", 'اكتوبر': \"10\",",
"= re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text) dt = dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if self.stop_search(dt): break new_cases_line =",
"import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET except ImportError: TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET') class TwitterAPI:",
"1 data.append({ \"date\": dt, \"new_cases\": new_cases, \"recoveries\": recoveries, \"in_icu\": in_icu, \"death\": new_deaths, \"text\":",
"dt = dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if self.stop_search(dt): break new_cases_line = re.findall( \"^.*جديدة.*$\", tweet.full_text, re.MULTILINE)[0]",
"include_rts=False, tweet_mode='extended', exclude_replies=False, ).items(num_tweets) return tweets class TwitterCollectorBase: def __init__(self, api, username: str,",
"consumer_key, consumer_secret): auth = tweepy.AppAuthHandler(consumer_key, consumer_secret) return tweepy.API(auth) def get_tweets(self, username, num_tweets=30): tweets",
"= [] self.output_path = \"./algeria-covid19-icu-data.csv\" self._data_old = self._get_current_data() def _set_output_path(self, paths, output_path): if",
"tweet.full_text) or re.search( r\"حصيلة وباء كورونا كوفيد-19 ليوم\", tweet.full_text) or re.search( r\"نوافيكم بالحصيلة",
"re.search( r\"حصيلة وباء كورونا كوفيد-19 ليوم\", tweet.full_text) or re.search( r\"نوافيكم بالحصيلة الكاملة\", tweet.full_text)",
"self._get_current_data() def _set_output_path(self, paths, output_path): if output_path is None: if paths is not",
"= \"./algeria-covid19-icu-data.csv\" self._data_old = self._get_current_data() def _set_output_path(self, paths, output_path): if output_path is None:",
"# Mainly contributed by <NAME> https://github.com/lucasrodes # The code is under completely open",
"\"12\" }[month] def main(): api = TwitterAPI(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) Algeria(api).to_csv() if __name__ == \"__main__\":",
"tweet in self.tweets: match = re.search(r\"مؤشرات الترصد لوباء كوفيد-19\", tweet.full_text) or re.search( r\"حصيلة",
"r\"حصيلة وباء كورونا كوفيد-19 ليوم\", tweet.full_text) or re.search( r\"نوافيكم بالحصيلة الكاملة\", tweet.full_text) match2",
"ليوم\", tweet.full_text) or re.search( r\"نوافيكم بالحصيلة الكاملة\", tweet.full_text) match2 = re.search(r\"العناية المركز\", tweet.full_text)",
"tweepy try: from config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET except ImportError: TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET",
"num_tweets=100): self.username = username self.location = location self.tweets = api.get_tweets(self.username, num_tweets) self.tweets_relevant =",
"return tweepy.API(auth) def get_tweets(self, username, num_tweets=30): tweets = tweepy.Cursor(self._api.user_timeline, screen_name=username, include_rts=False, tweet_mode='extended', exclude_replies=False,",
"df = ( self._propose_df() .pipe(self.merge_with_current_data) .sort_values(\"date\") ) return df def build_post_url(self, tweet_id: str):",
"بالحصيلة الكاملة\", tweet.full_text) match2 = re.search(r\"العناية المركز\", tweet.full_text) if match and match2: dt_match",
"__init__(self, api, **kwargs): super().__init__( api=api, username=\"Sante_Gouv_dz\", location=\"Algeria\", **kwargs ) def _propose_df(self): data =",
"and match2: dt_match = re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text) dt = dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if self.stop_search(dt):",
"not None: return paths.tmp_vax_out_proposal(self.location) else: raise AttributeError( \"Either specify attribute `paths` or method",
"\"^.*العناية المركز.*$\", tweet.full_text, re.MULTILINE)[0] in_icu = int(re.search(r'\\d+', in_icu_line).group(0)) new_deaths_line = re.findall( \"^.*وفيات.*$\", tweet.full_text,",
"is None: if paths is not None: return paths.tmp_vax_out_proposal(self.location) else: raise AttributeError( \"Either",
"= re.search(r\"مؤشرات الترصد لوباء كوفيد-19\", tweet.full_text) or re.search( r\"حصيلة وباء كورونا كوفيد-19 ليوم\",",
"\"11\", 'ديسمبر': \"12\" }[month] def main(): api = TwitterAPI(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) Algeria(api).to_csv() if __name__",
"\"recoveries\": recoveries, \"in_icu\": in_icu, \"death\": new_deaths, \"text\": tweet.full_text, \"source_url\": self.build_post_url(tweet.id), }) df =",
"int( re.search(r'\\d+', new_deaths_line[0]).group(0)) else: if(re.findall( \"^.*وفاة واحدة.*$\", tweet.full_text, re.MULTILINE)[0]): new_deaths = 1 data.append({",
"None @property def last_update(self): if self._data_old is not None: return self._data_old.date.max() else: return",
"'اوت': \"08\", 'أوت': \"08\", 'سبتمبر': \"09\", 'أكتوبر': \"10\", 'اكتوبر': \"10\", 'كتوبر': \"10\", 'نوفمبر':",
"self.last_update: return True def to_csv(self): df = self.propose_df() df.to_csv(self.output_path, index=False) class Algeria(TwitterCollectorBase): def",
"_propose_df(self): data = [] for tweet in self.tweets: match = re.search(r\"مؤشرات الترصد لوباء",
"'جانفي': \"01\", 'فيفري': \"02\", 'مارس': \"03\", 'أفريل': \"04\", 'ماي': \"05\", 'جوان': \"06\", 'جويلية':",
"كوفيد-19 ليوم\", tweet.full_text) or re.search( r\"نوافيكم بالحصيلة الكاملة\", tweet.full_text) match2 = re.search(r\"العناية المركز\",",
"consumer_secret) def _get_api(self, consumer_key, consumer_secret): auth = tweepy.AppAuthHandler(consumer_key, consumer_secret) return tweepy.API(auth) def get_tweets(self,",
"\"01\", 'فيفري': \"02\", 'مارس': \"03\", 'أفريل': \"04\", 'ماي': \"05\", 'جوان': \"06\", 'جويلية': \"07\",",
"return df def stop_search(self, dt): if self._data_old is None: return False elif dt",
"\"10\", 'اكتوبر': \"10\", 'كتوبر': \"10\", 'نوفمبر': \"11\", 'ديسمبر': \"12\" }[month] def main(): api",
"location self.tweets = api.get_tweets(self.username, num_tweets) self.tweets_relevant = [] self.output_path = \"./algeria-covid19-icu-data.csv\" self._data_old =",
"_get_current_data(self): if os.path.isfile(self.output_path): return pd.read_csv(self.output_path) else: None @property def last_update(self): if self._data_old is",
"self._data_old = self._get_current_data() def _set_output_path(self, paths, output_path): if output_path is None: if paths",
"auth = tweepy.AppAuthHandler(consumer_key, consumer_secret) return tweepy.API(auth) def get_tweets(self, username, num_tweets=30): tweets = tweepy.Cursor(self._api.user_timeline,",
"def _set_output_path(self, paths, output_path): if output_path is None: if paths is not None:",
"\"source_url\": self.build_post_url(tweet.id), }) df = pd.DataFrame(data) return df def arabicMonthToNum(month): return { 'جانفي':",
"paths, output_path): if output_path is None: if paths is not None: return paths.tmp_vax_out_proposal(self.location)",
"recoveries, \"in_icu\": in_icu, \"death\": new_deaths, \"text\": tweet.full_text, \"source_url\": self.build_post_url(tweet.id), }) df = pd.DataFrame(data)",
"\"08\", 'سبتمبر': \"09\", 'أكتوبر': \"10\", 'اكتوبر': \"10\", 'كتوبر': \"10\", 'نوفمبر': \"11\", 'ديسمبر': \"12\"",
"int(re.search(r'\\d+', new_cases_line).group(0)) recoveries_line = re.findall( \"^.*للشفاء.*$\", tweet.full_text, re.MULTILINE)[0] recoveries = int(re.search(r'\\d+', recoveries_line).group(0)) in_icu_line",
"Our World in Data # https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter # Mainly contributed by <NAME> https://github.com/lucasrodes #",
"int(re.search(r'\\d+', recoveries_line).group(0)) in_icu_line = re.findall( \"^.*العناية المركز.*$\", tweet.full_text, re.MULTILINE)[0] in_icu = int(re.search(r'\\d+', in_icu_line).group(0))",
"# A large portion of the code came from the COVID-19 Dataset project",
"completely open access under the Creative Commons BY license # https://creativecommons.org/licenses/by/4.0/ import os",
"self._data_old is not None: return self._data_old.date.max() else: return None def _propose_df(self): raise NotImplementedError",
"None: if paths is not None: return paths.tmp_vax_out_proposal(self.location) else: raise AttributeError( \"Either specify",
"@property def last_update(self): if self._data_old is not None: return self._data_old.date.max() else: return None",
"license # https://creativecommons.org/licenses/by/4.0/ import os import pandas as pd import re import tweepy",
"'كتوبر': \"10\", 'نوفمبر': \"11\", 'ديسمبر': \"12\" }[month] def main(): api = TwitterAPI(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)",
"# https://creativecommons.org/licenses/by/4.0/ import os import pandas as pd import re import tweepy try:",
"code came from the COVID-19 Dataset project by Our World in Data #",
"merge_with_current_data(self, df: pd.DataFrame) -> pd.DataFrame: if df.empty: return self._data_old if self._data_old is not",
"return { 'جانفي': \"01\", 'فيفري': \"02\", 'مارس': \"03\", 'أفريل': \"04\", 'ماي': \"05\", 'جوان':",
"كورونا كوفيد-19 ليوم\", tweet.full_text) or re.search( r\"نوافيكم بالحصيلة الكاملة\", tweet.full_text) match2 = re.search(r\"العناية",
"[] for tweet in self.tweets: match = re.search(r\"مؤشرات الترصد لوباء كوفيد-19\", tweet.full_text) or",
"under the Creative Commons BY license # https://creativecommons.org/licenses/by/4.0/ import os import pandas as",
"\"07\", 'اوت': \"08\", 'أوت': \"08\", 'سبتمبر': \"09\", 'أكتوبر': \"10\", 'اكتوبر': \"10\", 'كتوبر': \"10\",",
"= tweepy.Cursor(self._api.user_timeline, screen_name=username, include_rts=False, tweet_mode='extended', exclude_replies=False, ).items(num_tweets) return tweets class TwitterCollectorBase: def __init__(self,",
"re import tweepy try: from config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET except ImportError: TWITTER_CONSUMER_KEY =",
"Data # https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter # Mainly contributed by <NAME> https://github.com/lucasrodes # The code is",
"else: None @property def last_update(self): if self._data_old is not None: return self._data_old.date.max() else:",
"api, username: str, location: str, num_tweets=100): self.username = username self.location = location self.tweets",
"return pd.read_csv(self.output_path) else: None @property def last_update(self): if self._data_old is not None: return",
"os.getenv('TWITTER_CONSUMER_SECRET') class TwitterAPI: def __init__(self, consumer_key: str, consumer_secret: str): self._api = self._get_api(consumer_key, consumer_secret)",
"pd.read_csv(self.output_path) else: None @property def last_update(self): if self._data_old is not None: return self._data_old.date.max()",
"to_csv(self): df = self.propose_df() df.to_csv(self.output_path, index=False) class Algeria(TwitterCollectorBase): def __init__(self, api, **kwargs): super().__init__(",
"self.tweets_relevant = [] self.output_path = \"./algeria-covid19-icu-data.csv\" self._data_old = self._get_current_data() def _set_output_path(self, paths, output_path):",
"in_icu, \"death\": new_deaths, \"text\": tweet.full_text, \"source_url\": self.build_post_url(tweet.id), }) df = pd.DataFrame(data) return df",
"pd.concat([df, df_current]).sort_values(by=\"date\") return df def stop_search(self, dt): if self._data_old is None: return False",
"Algeria(TwitterCollectorBase): def __init__(self, api, **kwargs): super().__init__( api=api, username=\"Sante_Gouv_dz\", location=\"Algeria\", **kwargs ) def _propose_df(self):",
"A large portion of the code came from the COVID-19 Dataset project by",
"re.search( r\"نوافيكم بالحصيلة الكاملة\", tweet.full_text) match2 = re.search(r\"العناية المركز\", tweet.full_text) if match and",
"True def to_csv(self): df = self.propose_df() df.to_csv(self.output_path, index=False) class Algeria(TwitterCollectorBase): def __init__(self, api,",
"os.path.isfile(self.output_path): return pd.read_csv(self.output_path) else: None @property def last_update(self): if self._data_old is not None:",
"tweepy.AppAuthHandler(consumer_key, consumer_secret) return tweepy.API(auth) def get_tweets(self, username, num_tweets=30): tweets = tweepy.Cursor(self._api.user_timeline, screen_name=username, include_rts=False,",
"new_cases, \"recoveries\": recoveries, \"in_icu\": in_icu, \"death\": new_deaths, \"text\": tweet.full_text, \"source_url\": self.build_post_url(tweet.id), }) df",
"__init__(self, consumer_key: str, consumer_secret: str): self._api = self._get_api(consumer_key, consumer_secret) def _get_api(self, consumer_key, consumer_secret):",
"in_icu = int(re.search(r'\\d+', in_icu_line).group(0)) new_deaths_line = re.findall( \"^.*وفيات.*$\", tweet.full_text, re.MULTILINE) if(new_deaths_line): new_deaths =",
"import tweepy try: from config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET except ImportError: TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY')",
"tweet.full_text, re.MULTILINE)[0] in_icu = int(re.search(r'\\d+', in_icu_line).group(0)) new_deaths_line = re.findall( \"^.*وفيات.*$\", tweet.full_text, re.MULTILINE) if(new_deaths_line):",
"for tweet in self.tweets: match = re.search(r\"مؤشرات الترصد لوباء كوفيد-19\", tweet.full_text) or re.search(",
"class TwitterCollectorBase: def __init__(self, api, username: str, location: str, num_tweets=100): self.username = username",
"}[month] def main(): api = TwitterAPI(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) Algeria(api).to_csv() if __name__ == \"__main__\": main()",
"not None: df_current = self._data_old[~self._data_old.date.isin(df.date)] df = pd.concat([df, df_current]).sort_values(by=\"date\") return df def stop_search(self,",
"the code came from the COVID-19 Dataset project by Our World in Data",
"return None def _propose_df(self): raise NotImplementedError def propose_df(self): df = ( self._propose_df() .pipe(self.merge_with_current_data)",
"df def arabicMonthToNum(month): return { 'جانفي': \"01\", 'فيفري': \"02\", 'مارس': \"03\", 'أفريل': \"04\",",
"None: return self._data_old.date.max() else: return None def _propose_df(self): raise NotImplementedError def propose_df(self): df",
"pd.DataFrame: if df.empty: return self._data_old if self._data_old is not None: df_current = self._data_old[~self._data_old.date.isin(df.date)]",
"new_deaths = int( re.search(r'\\d+', new_deaths_line[0]).group(0)) else: if(re.findall( \"^.*وفاة واحدة.*$\", tweet.full_text, re.MULTILINE)[0]): new_deaths =",
"BY license # https://creativecommons.org/licenses/by/4.0/ import os import pandas as pd import re import",
"NotImplementedError def propose_df(self): df = ( self._propose_df() .pipe(self.merge_with_current_data) .sort_values(\"date\") ) return df def",
"data = [] for tweet in self.tweets: match = re.search(r\"مؤشرات الترصد لوباء كوفيد-19\",",
"None: return False elif dt >= self.last_update: return False elif dt < self.last_update:",
"__init__(self, api, username: str, location: str, num_tweets=100): self.username = username self.location = location",
"username, num_tweets=30): tweets = tweepy.Cursor(self._api.user_timeline, screen_name=username, include_rts=False, tweet_mode='extended', exclude_replies=False, ).items(num_tweets) return tweets class",
"self._data_old if self._data_old is not None: df_current = self._data_old[~self._data_old.date.isin(df.date)] df = pd.concat([df, df_current]).sort_values(by=\"date\")",
"import re import tweepy try: from config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET except ImportError: TWITTER_CONSUMER_KEY",
"return f\"https://twitter.com/{self.username}/status/{tweet_id}\" def merge_with_current_data(self, df: pd.DataFrame) -> pd.DataFrame: if df.empty: return self._data_old if",
"match = re.search(r\"مؤشرات الترصد لوباء كوفيد-19\", tweet.full_text) or re.search( r\"حصيلة وباء كورونا كوفيد-19",
"api=api, username=\"Sante_Gouv_dz\", location=\"Algeria\", **kwargs ) def _propose_df(self): data = [] for tweet in",
"raise AttributeError( \"Either specify attribute `paths` or method argument `output_path`\") def _get_current_data(self): if",
"\"death\": new_deaths, \"text\": tweet.full_text, \"source_url\": self.build_post_url(tweet.id), }) df = pd.DataFrame(data) return df def",
"tweet.full_text) if match and match2: dt_match = re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text) dt = dt_match.group(",
"re.findall( \"^.*جديدة.*$\", tweet.full_text, re.MULTILINE)[0] new_cases = int(re.search(r'\\d+', new_cases_line).group(0)) recoveries_line = re.findall( \"^.*للشفاء.*$\", tweet.full_text,",
"recoveries_line).group(0)) in_icu_line = re.findall( \"^.*العناية المركز.*$\", tweet.full_text, re.MULTILINE)[0] in_icu = int(re.search(r'\\d+', in_icu_line).group(0)) new_deaths_line",
"= self._get_api(consumer_key, consumer_secret) def _get_api(self, consumer_key, consumer_secret): auth = tweepy.AppAuthHandler(consumer_key, consumer_secret) return tweepy.API(auth)",
"if paths is not None: return paths.tmp_vax_out_proposal(self.location) else: raise AttributeError( \"Either specify attribute",
"return False elif dt < self.last_update: return True def to_csv(self): df = self.propose_df()",
"'نوفمبر': \"11\", 'ديسمبر': \"12\" }[month] def main(): api = TwitterAPI(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) Algeria(api).to_csv() if",
"الترصد لوباء كوفيد-19\", tweet.full_text) or re.search( r\"حصيلة وباء كورونا كوفيد-19 ليوم\", tweet.full_text) or",
"is not None: df_current = self._data_old[~self._data_old.date.isin(df.date)] df = pd.concat([df, df_current]).sort_values(by=\"date\") return df def",
"try: from config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET except ImportError: TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET =",
"return False elif dt >= self.last_update: return False elif dt < self.last_update: return",
"pd.DataFrame(data) return df def arabicMonthToNum(month): return { 'جانفي': \"01\", 'فيفري': \"02\", 'مارس': \"03\",",
"df def stop_search(self, dt): if self._data_old is None: return False elif dt >=",
"\"^.*وفيات.*$\", tweet.full_text, re.MULTILINE) if(new_deaths_line): new_deaths = int( re.search(r'\\d+', new_deaths_line[0]).group(0)) else: if(re.findall( \"^.*وفاة واحدة.*$\",",
"return paths.tmp_vax_out_proposal(self.location) else: raise AttributeError( \"Either specify attribute `paths` or method argument `output_path`\")",
"username self.location = location self.tweets = api.get_tweets(self.username, num_tweets) self.tweets_relevant = [] self.output_path =",
"class TwitterAPI: def __init__(self, consumer_key: str, consumer_secret: str): self._api = self._get_api(consumer_key, consumer_secret) def",
").items(num_tweets) return tweets class TwitterCollectorBase: def __init__(self, api, username: str, location: str, num_tweets=100):",
"num_tweets) self.tweets_relevant = [] self.output_path = \"./algeria-covid19-icu-data.csv\" self._data_old = self._get_current_data() def _set_output_path(self, paths,",
"dt, \"new_cases\": new_cases, \"recoveries\": recoveries, \"in_icu\": in_icu, \"death\": new_deaths, \"text\": tweet.full_text, \"source_url\": self.build_post_url(tweet.id),",
"method argument `output_path`\") def _get_current_data(self): if os.path.isfile(self.output_path): return pd.read_csv(self.output_path) else: None @property def",
"is not None: return self._data_old.date.max() else: return None def _propose_df(self): raise NotImplementedError def",
"def arabicMonthToNum(month): return { 'جانفي': \"01\", 'فيفري': \"02\", 'مارس': \"03\", 'أفريل': \"04\", 'ماي':",
"'فيفري': \"02\", 'مارس': \"03\", 'أفريل': \"04\", 'ماي': \"05\", 'جوان': \"06\", 'جويلية': \"07\", 'اوت':",
"propose_df(self): df = ( self._propose_df() .pipe(self.merge_with_current_data) .sort_values(\"date\") ) return df def build_post_url(self, tweet_id:",
"location=\"Algeria\", **kwargs ) def _propose_df(self): data = [] for tweet in self.tweets: match",
"\"05\", 'جوان': \"06\", 'جويلية': \"07\", 'اوت': \"08\", 'أوت': \"08\", 'سبتمبر': \"09\", 'أكتوبر': \"10\",",
"\"^.*وفاة واحدة.*$\", tweet.full_text, re.MULTILINE)[0]): new_deaths = 1 data.append({ \"date\": dt, \"new_cases\": new_cases, \"recoveries\":",
"def _propose_df(self): raise NotImplementedError def propose_df(self): df = ( self._propose_df() .pipe(self.merge_with_current_data) .sort_values(\"date\") )",
"https://github.com/lucasrodes # The code is under completely open access under the Creative Commons",
"in_icu_line = re.findall( \"^.*العناية المركز.*$\", tweet.full_text, re.MULTILINE)[0] in_icu = int(re.search(r'\\d+', in_icu_line).group(0)) new_deaths_line =",
"str, consumer_secret: str): self._api = self._get_api(consumer_key, consumer_secret) def _get_api(self, consumer_key, consumer_secret): auth =",
"location: str, num_tweets=100): self.username = username self.location = location self.tweets = api.get_tweets(self.username, num_tweets)",
"self.stop_search(dt): break new_cases_line = re.findall( \"^.*جديدة.*$\", tweet.full_text, re.MULTILINE)[0] new_cases = int(re.search(r'\\d+', new_cases_line).group(0)) recoveries_line",
"= os.getenv('TWITTER_CONSUMER_SECRET') class TwitterAPI: def __init__(self, consumer_key: str, consumer_secret: str): self._api = self._get_api(consumer_key,",
"False elif dt >= self.last_update: return False elif dt < self.last_update: return True",
"from the COVID-19 Dataset project by Our World in Data # https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter #",
"<NAME> https://github.com/lucasrodes # The code is under completely open access under the Creative",
"self.username = username self.location = location self.tweets = api.get_tweets(self.username, num_tweets) self.tweets_relevant = []",
"of the code came from the COVID-19 Dataset project by Our World in",
"= re.findall( \"^.*العناية المركز.*$\", tweet.full_text, re.MULTILINE)[0] in_icu = int(re.search(r'\\d+', in_icu_line).group(0)) new_deaths_line = re.findall(",
"except ImportError: TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET') class TwitterAPI: def __init__(self, consumer_key:",
"= 1 data.append({ \"date\": dt, \"new_cases\": new_cases, \"recoveries\": recoveries, \"in_icu\": in_icu, \"death\": new_deaths,",
"\"in_icu\": in_icu, \"death\": new_deaths, \"text\": tweet.full_text, \"source_url\": self.build_post_url(tweet.id), }) df = pd.DataFrame(data) return",
"recoveries = int(re.search(r'\\d+', recoveries_line).group(0)) in_icu_line = re.findall( \"^.*العناية المركز.*$\", tweet.full_text, re.MULTILINE)[0] in_icu =",
"import pandas as pd import re import tweepy try: from config import TWITTER_CONSUMER_KEY,",
"str, num_tweets=100): self.username = username self.location = location self.tweets = api.get_tweets(self.username, num_tweets) self.tweets_relevant",
"access under the Creative Commons BY license # https://creativecommons.org/licenses/by/4.0/ import os import pandas",
"df: pd.DataFrame) -> pd.DataFrame: if df.empty: return self._data_old if self._data_old is not None:",
"= re.search(r\"العناية المركز\", tweet.full_text) if match and match2: dt_match = re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text)",
"\"date\": dt, \"new_cases\": new_cases, \"recoveries\": recoveries, \"in_icu\": in_icu, \"death\": new_deaths, \"text\": tweet.full_text, \"source_url\":",
"# https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter # Mainly contributed by <NAME> https://github.com/lucasrodes # The code is under",
"str, location: str, num_tweets=100): self.username = username self.location = location self.tweets = api.get_tweets(self.username,",
"return True def to_csv(self): df = self.propose_df() df.to_csv(self.output_path, index=False) class Algeria(TwitterCollectorBase): def __init__(self,",
"if match and match2: dt_match = re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text) dt = dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2)",
"المركز.*$\", tweet.full_text, re.MULTILINE)[0] in_icu = int(re.search(r'\\d+', in_icu_line).group(0)) new_deaths_line = re.findall( \"^.*وفيات.*$\", tweet.full_text, re.MULTILINE)",
"Creative Commons BY license # https://creativecommons.org/licenses/by/4.0/ import os import pandas as pd import",
"int(re.search(r'\\d+', in_icu_line).group(0)) new_deaths_line = re.findall( \"^.*وفيات.*$\", tweet.full_text, re.MULTILINE) if(new_deaths_line): new_deaths = int( re.search(r'\\d+',",
"api.get_tweets(self.username, num_tweets) self.tweets_relevant = [] self.output_path = \"./algeria-covid19-icu-data.csv\" self._data_old = self._get_current_data() def _set_output_path(self,",
"None: df_current = self._data_old[~self._data_old.date.isin(df.date)] df = pd.concat([df, df_current]).sort_values(by=\"date\") return df def stop_search(self, dt):",
"paths.tmp_vax_out_proposal(self.location) else: raise AttributeError( \"Either specify attribute `paths` or method argument `output_path`\") def",
"def last_update(self): if self._data_old is not None: return self._data_old.date.max() else: return None def",
"match2 = re.search(r\"العناية المركز\", tweet.full_text) if match and match2: dt_match = re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\",",
"\"09\", 'أكتوبر': \"10\", 'اكتوبر': \"10\", 'كتوبر': \"10\", 'نوفمبر': \"11\", 'ديسمبر': \"12\" }[month] def",
"-> pd.DataFrame: if df.empty: return self._data_old if self._data_old is not None: df_current =",
"df.to_csv(self.output_path, index=False) class Algeria(TwitterCollectorBase): def __init__(self, api, **kwargs): super().__init__( api=api, username=\"Sante_Gouv_dz\", location=\"Algeria\", **kwargs",
"contributed by <NAME> https://github.com/lucasrodes # The code is under completely open access under",
"index=False) class Algeria(TwitterCollectorBase): def __init__(self, api, **kwargs): super().__init__( api=api, username=\"Sante_Gouv_dz\", location=\"Algeria\", **kwargs )",
"attribute `paths` or method argument `output_path`\") def _get_current_data(self): if os.path.isfile(self.output_path): return pd.read_csv(self.output_path) else:",
"code is under completely open access under the Creative Commons BY license #",
"'سبتمبر': \"09\", 'أكتوبر': \"10\", 'اكتوبر': \"10\", 'كتوبر': \"10\", 'نوفمبر': \"11\", 'ديسمبر': \"12\" }[month]",
"Commons BY license # https://creativecommons.org/licenses/by/4.0/ import os import pandas as pd import re",
"match and match2: dt_match = re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text) dt = dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if",
"def _get_current_data(self): if os.path.isfile(self.output_path): return pd.read_csv(self.output_path) else: None @property def last_update(self): if self._data_old",
"username: str, location: str, num_tweets=100): self.username = username self.location = location self.tweets =",
"= [] for tweet in self.tweets: match = re.search(r\"مؤشرات الترصد لوباء كوفيد-19\", tweet.full_text)",
"f\"https://twitter.com/{self.username}/status/{tweet_id}\" def merge_with_current_data(self, df: pd.DataFrame) -> pd.DataFrame: if df.empty: return self._data_old if self._data_old",
"return df def arabicMonthToNum(month): return { 'جانفي': \"01\", 'فيفري': \"02\", 'مارس': \"03\", 'أفريل':",
"api, **kwargs): super().__init__( api=api, username=\"Sante_Gouv_dz\", location=\"Algeria\", **kwargs ) def _propose_df(self): data = []",
"= int(re.search(r'\\d+', in_icu_line).group(0)) new_deaths_line = re.findall( \"^.*وفيات.*$\", tweet.full_text, re.MULTILINE) if(new_deaths_line): new_deaths = int(",
"if os.path.isfile(self.output_path): return pd.read_csv(self.output_path) else: None @property def last_update(self): if self._data_old is not",
"re.MULTILINE) if(new_deaths_line): new_deaths = int( re.search(r'\\d+', new_deaths_line[0]).group(0)) else: if(re.findall( \"^.*وفاة واحدة.*$\", tweet.full_text, re.MULTILINE)[0]):",
"tweet.full_text, re.MULTILINE)[0]): new_deaths = 1 data.append({ \"date\": dt, \"new_cases\": new_cases, \"recoveries\": recoveries, \"in_icu\":",
"if self._data_old is None: return False elif dt >= self.last_update: return False elif",
"def __init__(self, api, **kwargs): super().__init__( api=api, username=\"Sante_Gouv_dz\", location=\"Algeria\", **kwargs ) def _propose_df(self): data",
"username=\"Sante_Gouv_dz\", location=\"Algeria\", **kwargs ) def _propose_df(self): data = [] for tweet in self.tweets:",
">= self.last_update: return False elif dt < self.last_update: return True def to_csv(self): df",
"TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET') class TwitterAPI: def __init__(self, consumer_key: str, consumer_secret: str): self._api =",
"The code is under completely open access under the Creative Commons BY license",
"\"02\", 'مارس': \"03\", 'أفريل': \"04\", 'ماي': \"05\", 'جوان': \"06\", 'جويلية': \"07\", 'اوت': \"08\",",
"= self._data_old[~self._data_old.date.isin(df.date)] df = pd.concat([df, df_current]).sort_values(by=\"date\") return df def stop_search(self, dt): if self._data_old",
"consumer_secret) return tweepy.API(auth) def get_tweets(self, username, num_tweets=30): tweets = tweepy.Cursor(self._api.user_timeline, screen_name=username, include_rts=False, tweet_mode='extended',",
"= self._get_current_data() def _set_output_path(self, paths, output_path): if output_path is None: if paths is",
"config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET except ImportError: TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET') class",
"\"new_cases\": new_cases, \"recoveries\": recoveries, \"in_icu\": in_icu, \"death\": new_deaths, \"text\": tweet.full_text, \"source_url\": self.build_post_url(tweet.id), })",
"re.MULTILINE)[0] recoveries = int(re.search(r'\\d+', recoveries_line).group(0)) in_icu_line = re.findall( \"^.*العناية المركز.*$\", tweet.full_text, re.MULTILINE)[0] in_icu",
"last_update(self): if self._data_old is not None: return self._data_old.date.max() else: return None def _propose_df(self):",
"= api.get_tweets(self.username, num_tweets) self.tweets_relevant = [] self.output_path = \"./algeria-covid19-icu-data.csv\" self._data_old = self._get_current_data() def",
"'ماي': \"05\", 'جوان': \"06\", 'جويلية': \"07\", 'اوت': \"08\", 'أوت': \"08\", 'سبتمبر': \"09\", 'أكتوبر':",
"else: raise AttributeError( \"Either specify attribute `paths` or method argument `output_path`\") def _get_current_data(self):",
"new_deaths_line = re.findall( \"^.*وفيات.*$\", tweet.full_text, re.MULTILINE) if(new_deaths_line): new_deaths = int( re.search(r'\\d+', new_deaths_line[0]).group(0)) else:",
"self._data_old is not None: df_current = self._data_old[~self._data_old.date.isin(df.date)] df = pd.concat([df, df_current]).sort_values(by=\"date\") return df",
"new_cases_line).group(0)) recoveries_line = re.findall( \"^.*للشفاء.*$\", tweet.full_text, re.MULTILINE)[0] recoveries = int(re.search(r'\\d+', recoveries_line).group(0)) in_icu_line =",
"os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET') class TwitterAPI: def __init__(self, consumer_key: str, consumer_secret: str): self._api",
"= location self.tweets = api.get_tweets(self.username, num_tweets) self.tweets_relevant = [] self.output_path = \"./algeria-covid19-icu-data.csv\" self._data_old",
"def _propose_df(self): data = [] for tweet in self.tweets: match = re.search(r\"مؤشرات الترصد",
"the Creative Commons BY license # https://creativecommons.org/licenses/by/4.0/ import os import pandas as pd",
"the COVID-19 Dataset project by Our World in Data # https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter # Mainly",
"def _get_api(self, consumer_key, consumer_secret): auth = tweepy.AppAuthHandler(consumer_key, consumer_secret) return tweepy.API(auth) def get_tweets(self, username,",
"Dataset project by Our World in Data # https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter # Mainly contributed by",
"def build_post_url(self, tweet_id: str): return f\"https://twitter.com/{self.username}/status/{tweet_id}\" def merge_with_current_data(self, df: pd.DataFrame) -> pd.DataFrame: if",
"import os import pandas as pd import re import tweepy try: from config",
"'أكتوبر': \"10\", 'اكتوبر': \"10\", 'كتوبر': \"10\", 'نوفمبر': \"11\", 'ديسمبر': \"12\" }[month] def main():",
"argument `output_path`\") def _get_current_data(self): if os.path.isfile(self.output_path): return pd.read_csv(self.output_path) else: None @property def last_update(self):",
"self.tweets: match = re.search(r\"مؤشرات الترصد لوباء كوفيد-19\", tweet.full_text) or re.search( r\"حصيلة وباء كورونا",
"r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text) dt = dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if self.stop_search(dt): break new_cases_line = re.findall( \"^.*جديدة.*$\",",
"**kwargs): super().__init__( api=api, username=\"Sante_Gouv_dz\", location=\"Algeria\", **kwargs ) def _propose_df(self): data = [] for",
"AttributeError( \"Either specify attribute `paths` or method argument `output_path`\") def _get_current_data(self): if os.path.isfile(self.output_path):",
"def merge_with_current_data(self, df: pd.DataFrame) -> pd.DataFrame: if df.empty: return self._data_old if self._data_old is",
") def _propose_df(self): data = [] for tweet in self.tweets: match = re.search(r\"مؤشرات",
"self._get_api(consumer_key, consumer_secret) def _get_api(self, consumer_key, consumer_secret): auth = tweepy.AppAuthHandler(consumer_key, consumer_secret) return tweepy.API(auth) def",
"df def build_post_url(self, tweet_id: str): return f\"https://twitter.com/{self.username}/status/{tweet_id}\" def merge_with_current_data(self, df: pd.DataFrame) -> pd.DataFrame:",
"tweet.full_text, re.MULTILINE)[0] recoveries = int(re.search(r'\\d+', recoveries_line).group(0)) in_icu_line = re.findall( \"^.*العناية المركز.*$\", tweet.full_text, re.MULTILINE)[0]",
"consumer_key: str, consumer_secret: str): self._api = self._get_api(consumer_key, consumer_secret) def _get_api(self, consumer_key, consumer_secret): auth",
"self.propose_df() df.to_csv(self.output_path, index=False) class Algeria(TwitterCollectorBase): def __init__(self, api, **kwargs): super().__init__( api=api, username=\"Sante_Gouv_dz\", location=\"Algeria\",",
"dt): if self._data_old is None: return False elif dt >= self.last_update: return False",
"'مارس': \"03\", 'أفريل': \"04\", 'ماي': \"05\", 'جوان': \"06\", 'جويلية': \"07\", 'اوت': \"08\", 'أوت':",
"df.empty: return self._data_old if self._data_old is not None: df_current = self._data_old[~self._data_old.date.isin(df.date)] df =",
"str): self._api = self._get_api(consumer_key, consumer_secret) def _get_api(self, consumer_key, consumer_secret): auth = tweepy.AppAuthHandler(consumer_key, consumer_secret)",
"as pd import re import tweepy try: from config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET except",
"= username self.location = location self.tweets = api.get_tweets(self.username, num_tweets) self.tweets_relevant = [] self.output_path",
"https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter # Mainly contributed by <NAME> https://github.com/lucasrodes # The code is under completely",
"\"Either specify attribute `paths` or method argument `output_path`\") def _get_current_data(self): if os.path.isfile(self.output_path): return",
"else: return None def _propose_df(self): raise NotImplementedError def propose_df(self): df = ( self._propose_df()",
"re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text) dt = dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if self.stop_search(dt): break new_cases_line = re.findall(",
"'أفريل': \"04\", 'ماي': \"05\", 'جوان': \"06\", 'جويلية': \"07\", 'اوت': \"08\", 'أوت': \"08\", 'سبتمبر':",
"'أوت': \"08\", 'سبتمبر': \"09\", 'أكتوبر': \"10\", 'اكتوبر': \"10\", 'كتوبر': \"10\", 'نوفمبر': \"11\", 'ديسمبر':",
"None def _propose_df(self): raise NotImplementedError def propose_df(self): df = ( self._propose_df() .pipe(self.merge_with_current_data) .sort_values(\"date\")",
"is not None: return paths.tmp_vax_out_proposal(self.location) else: raise AttributeError( \"Either specify attribute `paths` or",
"if self._data_old is not None: return self._data_old.date.max() else: return None def _propose_df(self): raise",
"re.MULTILINE)[0] new_cases = int(re.search(r'\\d+', new_cases_line).group(0)) recoveries_line = re.findall( \"^.*للشفاء.*$\", tweet.full_text, re.MULTILINE)[0] recoveries =",
"consumer_secret): auth = tweepy.AppAuthHandler(consumer_key, consumer_secret) return tweepy.API(auth) def get_tweets(self, username, num_tweets=30): tweets =",
"3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if self.stop_search(dt): break new_cases_line = re.findall( \"^.*جديدة.*$\", tweet.full_text, re.MULTILINE)[0] new_cases = int(re.search(r'\\d+',",
"= tweepy.AppAuthHandler(consumer_key, consumer_secret) return tweepy.API(auth) def get_tweets(self, username, num_tweets=30): tweets = tweepy.Cursor(self._api.user_timeline, screen_name=username,",
"new_deaths_line[0]).group(0)) else: if(re.findall( \"^.*وفاة واحدة.*$\", tweet.full_text, re.MULTILINE)[0]): new_deaths = 1 data.append({ \"date\": dt,",
"\"text\": tweet.full_text, \"source_url\": self.build_post_url(tweet.id), }) df = pd.DataFrame(data) return df def arabicMonthToNum(month): return",
"وباء كورونا كوفيد-19 ليوم\", tweet.full_text) or re.search( r\"نوافيكم بالحصيلة الكاملة\", tweet.full_text) match2 =",
"screen_name=username, include_rts=False, tweet_mode='extended', exclude_replies=False, ).items(num_tweets) return tweets class TwitterCollectorBase: def __init__(self, api, username:",
"TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET except ImportError: TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET') class TwitterAPI: def",
"new_deaths, \"text\": tweet.full_text, \"source_url\": self.build_post_url(tweet.id), }) df = pd.DataFrame(data) return df def arabicMonthToNum(month):",
"project by Our World in Data # https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter # Mainly contributed by <NAME>",
"output_path is None: if paths is not None: return paths.tmp_vax_out_proposal(self.location) else: raise AttributeError(",
"def to_csv(self): df = self.propose_df() df.to_csv(self.output_path, index=False) class Algeria(TwitterCollectorBase): def __init__(self, api, **kwargs):",
"def stop_search(self, dt): if self._data_old is None: return False elif dt >= self.last_update:",
"_propose_df(self): raise NotImplementedError def propose_df(self): df = ( self._propose_df() .pipe(self.merge_with_current_data) .sort_values(\"date\") ) return",
"`output_path`\") def _get_current_data(self): if os.path.isfile(self.output_path): return pd.read_csv(self.output_path) else: None @property def last_update(self): if",
"ImportError: TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET') class TwitterAPI: def __init__(self, consumer_key: str,",
"= dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if self.stop_search(dt): break new_cases_line = re.findall( \"^.*جديدة.*$\", tweet.full_text, re.MULTILINE)[0] new_cases",
"TWITTER_CONSUMER_SECRET except ImportError: TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY') TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET') class TwitterAPI: def __init__(self,",
"elif dt >= self.last_update: return False elif dt < self.last_update: return True def",
"data.append({ \"date\": dt, \"new_cases\": new_cases, \"recoveries\": recoveries, \"in_icu\": in_icu, \"death\": new_deaths, \"text\": tweet.full_text,",
"if self._data_old is not None: df_current = self._data_old[~self._data_old.date.isin(df.date)] df = pd.concat([df, df_current]).sort_values(by=\"date\") return",
"str): return f\"https://twitter.com/{self.username}/status/{tweet_id}\" def merge_with_current_data(self, df: pd.DataFrame) -> pd.DataFrame: if df.empty: return self._data_old",
"dt < self.last_update: return True def to_csv(self): df = self.propose_df() df.to_csv(self.output_path, index=False) class",
"in_icu_line).group(0)) new_deaths_line = re.findall( \"^.*وفيات.*$\", tweet.full_text, re.MULTILINE) if(new_deaths_line): new_deaths = int( re.search(r'\\d+', new_deaths_line[0]).group(0))",
"return self._data_old.date.max() else: return None def _propose_df(self): raise NotImplementedError def propose_df(self): df =",
"كوفيد-19\", tweet.full_text) or re.search( r\"حصيلة وباء كورونا كوفيد-19 ليوم\", tweet.full_text) or re.search( r\"نوافيكم",
"False elif dt < self.last_update: return True def to_csv(self): df = self.propose_df() df.to_csv(self.output_path,",
"in self.tweets: match = re.search(r\"مؤشرات الترصد لوباء كوفيد-19\", tweet.full_text) or re.search( r\"حصيلة وباء",
"'ديسمبر': \"12\" }[month] def main(): api = TwitterAPI(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) Algeria(api).to_csv() if __name__ ==",
"re.findall( \"^.*العناية المركز.*$\", tweet.full_text, re.MULTILINE)[0] in_icu = int(re.search(r'\\d+', in_icu_line).group(0)) new_deaths_line = re.findall( \"^.*وفيات.*$\",",
"= pd.DataFrame(data) return df def arabicMonthToNum(month): return { 'جانفي': \"01\", 'فيفري': \"02\", 'مارس':",
"tweet.full_text) or re.search( r\"نوافيكم بالحصيلة الكاملة\", tweet.full_text) match2 = re.search(r\"العناية المركز\", tweet.full_text) if",
"pandas as pd import re import tweepy try: from config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET",
"\"04\", 'ماي': \"05\", 'جوان': \"06\", 'جويلية': \"07\", 'اوت': \"08\", 'أوت': \"08\", 'سبتمبر': \"09\",",
"COVID-19 Dataset project by Our World in Data # https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter # Mainly contributed",
"recoveries_line = re.findall( \"^.*للشفاء.*$\", tweet.full_text, re.MULTILINE)[0] recoveries = int(re.search(r'\\d+', recoveries_line).group(0)) in_icu_line = re.findall(",
"tweet.full_text) dt = dt_match.group( 3)+\"-\"+arabicMonthToNum(dt_match.group(2))+\"-\"+dt_match.group(1).zfill(2) if self.stop_search(dt): break new_cases_line = re.findall( \"^.*جديدة.*$\", tweet.full_text,",
"tweets = tweepy.Cursor(self._api.user_timeline, screen_name=username, include_rts=False, tweet_mode='extended', exclude_replies=False, ).items(num_tweets) return tweets class TwitterCollectorBase: def",
"build_post_url(self, tweet_id: str): return f\"https://twitter.com/{self.username}/status/{tweet_id}\" def merge_with_current_data(self, df: pd.DataFrame) -> pd.DataFrame: if df.empty:",
"break new_cases_line = re.findall( \"^.*جديدة.*$\", tweet.full_text, re.MULTILINE)[0] new_cases = int(re.search(r'\\d+', new_cases_line).group(0)) recoveries_line =",
"= re.findall( \"^.*جديدة.*$\", tweet.full_text, re.MULTILINE)[0] new_cases = int(re.search(r'\\d+', new_cases_line).group(0)) recoveries_line = re.findall( \"^.*للشفاء.*$\",",
"df = self.propose_df() df.to_csv(self.output_path, index=False) class Algeria(TwitterCollectorBase): def __init__(self, api, **kwargs): super().__init__( api=api,",
"def __init__(self, api, username: str, location: str, num_tweets=100): self.username = username self.location =",
"return df def build_post_url(self, tweet_id: str): return f\"https://twitter.com/{self.username}/status/{tweet_id}\" def merge_with_current_data(self, df: pd.DataFrame) ->",
"get_tweets(self, username, num_tweets=30): tweets = tweepy.Cursor(self._api.user_timeline, screen_name=username, include_rts=False, tweet_mode='extended', exclude_replies=False, ).items(num_tweets) return tweets",
"class Algeria(TwitterCollectorBase): def __init__(self, api, **kwargs): super().__init__( api=api, username=\"Sante_Gouv_dz\", location=\"Algeria\", **kwargs ) def",
"self.last_update: return False elif dt < self.last_update: return True def to_csv(self): df =",
"None: return paths.tmp_vax_out_proposal(self.location) else: raise AttributeError( \"Either specify attribute `paths` or method argument",
"re.search(r\"العناية المركز\", tweet.full_text) if match and match2: dt_match = re.search( r\"(\\d{1,2})\\s*([ء-ي]+)\\s*[ء-ي]*(202\\d)\", tweet.full_text) dt",
"re.MULTILINE)[0] in_icu = int(re.search(r'\\d+', in_icu_line).group(0)) new_deaths_line = re.findall( \"^.*وفيات.*$\", tweet.full_text, re.MULTILINE) if(new_deaths_line): new_deaths",
"by Our World in Data # https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter # Mainly contributed by <NAME> https://github.com/lucasrodes",
"{ 'جانفي': \"01\", 'فيفري': \"02\", 'مارس': \"03\", 'أفريل': \"04\", 'ماي': \"05\", 'جوان': \"06\",",
"tweet.full_text, re.MULTILINE) if(new_deaths_line): new_deaths = int( re.search(r'\\d+', new_deaths_line[0]).group(0)) else: if(re.findall( \"^.*وفاة واحدة.*$\", tweet.full_text,",
"specify attribute `paths` or method argument `output_path`\") def _get_current_data(self): if os.path.isfile(self.output_path): return pd.read_csv(self.output_path)"
] |
[
"abstract class that has meta information used by the subject and haplotype classes.",
"to find novel sub-alleles) # Limit alleles that can be chosen based on",
"== self.haplotypes[i]) & (self.translation_table[\"MATCH\"] > 0) ].shape[0] * haplotypes[i] for i in range(num_haps))",
"Unless required by applicable law or agreed to in writing, software # distributed",
"to standardized form Args: alt (str): alt allele ref (str): ref allele Returns:",
"variant alleles used hap_prob += lpSum( self.translation_table[ (self.translation_table.iloc[:,0] == self.haplotypes[i]) & (self.translation_table[\"MATCH\"] >",
"re-attempted with phasing off.\") return None, None else: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix}",
"hap_prob.solve(PULP_CBC_CMD(msg=0)) def lp_hap(self) -> tuple: \"\"\" Build and run the LP problem Returns:",
"form Args: var_type (str): insertion, deletion, or substitution alt (str): allele from translation",
"<= int(self.config.LP_PARAMS[\"max_haps\"])) # Cannot choose more than x haplotypes (may be increased to",
"self.genotypes[ID] except KeyError: # Not in VCF return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand try: vcf_geno =",
"to assemble the constraint for phased data Looks at all strands that are",
"= self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\", \"STRAND\", \"Type\", \"Variant Start\"]].drop_duplicates() # List of matched variants self.haplotypes",
"# List of matched variants self.haplotypes = [hap for hap in self.translation_table.iloc[:,0].unique().tolist()] #",
"(str): ref allele Returns: str: reformatted alt allele \"\"\" if alt is None:",
"len(called) == 1: called.append(self.reference) return called, variants, len(haps), is_ref def _solve(self, hap_prob: object)",
"haplotypes[k] for k in range(num_haps))) == self.variants.iloc[i,1]) if self.phased: for i in range(num_haps):",
"else np.array(default)) def optimize_hap(self) -> (): \"\"\" Solve for the most likely diplotype",
"with phasing off.\") return None, None else: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will",
"= row[\"ID\"] try: genotype = self.genotypes[ID] except KeyError: # Not in VCF return",
"alt_matches, strand def _haps_from_prob(self, lp_problem: object) -> tuple: \"\"\" Take a optimally solved",
"to a standardized form Args: var_type (str): insertion, deletion, or substitution alt (str):",
"for m in matches] self.translation_table[\"VAR_ID\"] = self.translation_table.apply( lambda x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis = 1",
"<= 1 # max one strand hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for",
"None: return \"-\" if \"<\" in alt: return f\"s{alt.strip('<>')}\" elif len(ref) > len(alt):",
"else: ID = row[\"ID\"] try: genotype = self.genotypes[ID] except KeyError: # Not in",
"= [LpVariable(var, cat = \"Binary\") for var in self.variants[\"VAR_ID\"]] # Set constraint of",
"return [f'id-'] else: try: return [f's{a}' for a in self.config.IUPAC_CODES[alt]] except KeyError: return",
"reformatted alt allele \"\"\" if alt is None: return \"-\" if \"<\" in",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"per variant hap_prob += (variants[i] <= (lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))))",
"+= ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) <= self.variants.iloc[i,1] * variants[i]) #",
"the specific language governing permissions and # limitations under the License. import sys",
"to two alleles per variant hap_prob += (variants[i] <= (lpSum(hap_vars[k][i] * haplotypes[k] for",
"CYP2D6*5 will be missed by the other methods if self.variants.iloc[i,3] == \"CNV\": hap_prob",
"AbstractGene from . import LOGGING class NoVariantsException(Exception): \"\"\" Exception to call if a",
"for i in range(num_haps)) <= 1 hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for",
"for i in range(num_haps)) <= int(self.config.LP_PARAMS[\"max_haps\"])) # Cannot choose more than x haplotypes",
"in range(num_haps)) self._solve(hap_prob) if hap_prob.status != 1: if self.phased: LOGGING.warning(f\"No feasible solution found,",
"\"id-\" elif len(ref) < len(alt): return f'id{alt[1:]}' # Remove first position else: return",
"called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if is_ref: possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants)) return possible_haplotypes, haplotype_variants",
"called, variants, len(haps), is_ref def _solve(self, hap_prob: object) -> object: if self.solver ==",
"else: haps.append((v.name, v.varValue)) if len(haps) == 0: called = [self.reference, self.reference] is_ref =",
"elif len(haps) == 2: called = [haps[0][0], haps[1][0]] else: called = np.array([np.repeat(i[0], i[1])",
"to run the table_matcher function with genotyped before you can optimize\") sys.exit(1) called,",
"in tt_alt_geno]) == 1 else -1 elif alt_matches == 2 and genotype[\"phased\"]: strand",
"len(alt): return \"id-\" elif len(ref) < len(alt): return f'id{alt[1:]}' # Remove first position",
"is None: return \"-\" if \"<\" in alt: return f\"s{alt.strip('<>')}\" elif len(ref) >",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"if alt_matches == 1 and genotype[\"phased\"]: strand = 1 if max([vcf_geno.index(a) for a",
"in range(num_haps))) <= self.variants.iloc[i,1] * variants[i]) # Any CNV variants defined, if matched",
"lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) >= -1 # Set to",
"LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will be re-attempted with phasing off.\") return None,",
"when a phased call attempt fails self.phased = False called, variants = self.lp_hap()",
"Copyright 2021 <NAME> # Licensed under the Apache License, Version 2.0 (the \"License\");",
"= 1 if max([vcf_geno.index(a) for a in tt_alt_geno]) == 1 else -1 elif",
"sc = self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]][\"STRAND\"].unique() sc = np.delete(sc, np.where(sc == [3])) return(sc if",
"on zygosity for i in range(num_vars): # Iterate over every variant # A",
"return possible_haplotypes, haplotype_variants max_opt = hap_prob.objective.value() opt = max_opt while opt >= (max_opt",
"-> tuple: \"\"\" Build and run the LP problem Returns: tuple: list of",
"[\"-\", \"-\"]: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand tt_alt_geno = self._mod_tt_record(row.iloc[8], row.iloc[7]) alt_matches = sum([vcf_geno.count(a) for",
"{self.sample_prefix} will not be called\") return [], [] else: called, variants, hap_len, is_ref",
"record from VCF to standardized form Args: alt (str): alt allele ref (str):",
"(lpSum(haplotypes[i] for i in range(num_haps)) <= int(self.config.LP_PARAMS[\"max_haps\"])) # Cannot choose more than x",
"== k for k in no_match]) > 0: drops.append(i) self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] #",
"gene.solver self.matched = False self.sample_prefix = sample_prefix self.genotypes = gene.get_sample_vars(sample_prefix) if len(self.genotypes) ==",
"for phased data Looks at all strands that are part of a haplotype",
"\"\"\" Take a optimally solved lp problem Produce called haplotypes Args: lp_problem (object):",
"[haps[0][0], haps[1][0]] else: called = np.array([np.repeat(i[0], i[1]) for i in haps]).flatten().tolist() if len(called)",
"self.sample_prefix = sample_prefix self.genotypes = gene.get_sample_vars(sample_prefix) if len(self.genotypes) == 0: raise NoVariantsException self.translation_table",
"= [] for hap in self.haplotypes: trans = self.translation_table[self.translation_table.iloc[:,0] == hap] hap_vars.append([1 if",
"and associated information \"\"\" is_ref = False haps = [] variants = []",
"variant # A variant allele can only be used once per haplotype, up",
"used hap_prob += lpSum( self.translation_table[ (self.translation_table.iloc[:,0] == self.haplotypes[i]) & (self.translation_table[\"MATCH\"] > 0) ].shape[0]",
"called\") return [], [] else: called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if is_ref:",
"return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand tt_alt_geno = self._mod_tt_record(row.iloc[8], row.iloc[7]) alt_matches = sum([vcf_geno.count(a) for a in",
"= self.translation_table[self.translation_table.iloc[:,0] == hap] hap_vars.append([1 if var in trans[\"VAR_ID\"].unique() else 0 for var",
"* self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) <= 1 hap_prob += lpSum(haplotypes[i] *",
"while opt >= (max_opt - float(self.config.LP_PARAMS[\"optimal_decay\"])) and not is_ref and hap_prob.status >= 0:",
"hap_prob.status != 1: break opt = hap_prob.objective.value() new_called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob)",
"self.translation_table.iloc[:,0].unique().tolist()] # List of possible haplotypes def _mod_vcf_record(self, alt: str, ref: str) ->",
"= self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] == 99].tolist()) haps = self.translation_table[\"Haplotype Name\"] no_match = self.translation_table[self.translation_table[\"MATCH\"] == 0].iloc[:,0]",
"def _get_strand_constraint(self, i: int, default: list) -> list: \"\"\" Helps to assemble the",
"self.phased: for i in range(num_haps): hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, []).size) <= 1",
"row.iloc[8] in [\"insertion\", \"deletion\"]: new_pos = int(row[\"ID\"].split(\"_\")[1]) - 1 ID = f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else:",
"if all are homozygous Returns: list: [description] \"\"\" sc = self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]][\"STRAND\"].unique()",
"str, ref: str) -> str: \"\"\" Modifies record from VCF to standardized form",
"= sample_prefix self.genotypes = gene.get_sample_vars(sample_prefix) if len(self.genotypes) == 0: raise NoVariantsException self.translation_table =",
"if matched with a haplotype, MUST be used # Otherwise, variants like CYP2D6*5",
"[] for i in no_match.unique(): if sum([i == k for k in no_match])",
"Set constraint of two haplotypes selected hap_prob += (lpSum(haplotypes[i] for i in range(num_haps))",
"return None, None else: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will not be called\")",
"pd import numpy as np from pulp import * from .gene import AbstractGene",
"(variants[i] <= (lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps)))) # A given variant",
"= [haps[0][0], haps[1][0]] else: called = np.array([np.repeat(i[0], i[1]) for i in haps]).flatten().tolist() if",
"not use this file except in compliance with the License. # You may",
"AbstractGene, sample_prefix: str, config = None) -> None: \"\"\" Create a new haplotype",
"translation table ref to a standardized form Args: var_type (str): insertion, deletion, or",
"self.matched = True matches = self.translation_table.apply(self._match, axis = 1) self.translation_table[\"MATCH\"] = [m[0] for",
"except KeyError: # Not in VCF return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand try: vcf_geno = [self._mod_vcf_record(g,",
"self.translation_table[\"STRAND\"] = [m[1] for m in matches] self.translation_table[\"VAR_ID\"] = self.translation_table.apply( lambda x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}',",
"(str): insertion, deletion, or substitution alt (str): allele from translation table Returns: [list]:",
"== 0: break called = new_called return possible_haplotypes, haplotype_variants def _get_strand_constraint(self, i: int,",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"[m[0] for m in matches] self.translation_table[\"STRAND\"] = [m[1] for m in matches] self.translation_table[\"VAR_ID\"]",
"zygosity for i in range(num_vars): # Iterate over every variant # A variant",
"found, {self.sample_prefix} will not be called\") return [], [] else: called, variants, hap_len,",
"len(haps) == 0: called = [self.reference, self.reference] is_ref = True elif len(haps) ==",
"as list based on iupac \"\"\" alt = alt.strip(\"<>\") if var_type == \"insertion\":",
"agreed to in writing, software # distributed under the License is distributed on",
"config = None) -> None: \"\"\" Create a new haplotype object This object",
"CNV variants defined, if matched with a haplotype, MUST be used # Otherwise,",
"variants[i]) # Any CNV variants defined, if matched with a haplotype, MUST be",
"if self.phased: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will be re-attempted with phasing off.\")",
"assemble the constraint for phased data Looks at all strands that are part",
"a particular position) \"\"\" strand = 0 if row.iloc[8] in [\"insertion\", \"deletion\"]: new_pos",
"in matches] self.translation_table[\"VAR_ID\"] = self.translation_table.apply( lambda x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis = 1 ) self.translation_table",
"index default (list): default return if nothing matches or if all are homozygous",
"try: vcf_geno = [self._mod_vcf_record(g, genotype[\"ref\"]) for g in genotype[\"alleles\"]] except AttributeError: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]),",
"part of a haplotype Removes homozygous calls Args: i (int): haplotype index default",
"possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants))) hap_prob += lpSum([h.value() * h for h in haplotypes]) <= hap_len",
"return [f's{a}' for a in self.config.IUPAC_CODES[alt]] except KeyError: return [f's{alt}'] def _match(self, row:",
"1 else -1 elif alt_matches == 2 and genotype[\"phased\"]: strand = 3 return",
"is_ref = False haps = [] variants = [] for v in lp_problem.variables():",
"hap_len, is_ref = self._haps_from_prob(hap_prob) if is_ref: possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants)) return possible_haplotypes, haplotype_variants max_opt =",
"variant hap_prob += (variants[i] <= (lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps)))) #",
"called = [self.reference, self.reference] is_ref = True elif len(haps) == 2: called =",
"permissions and # limitations under the License. import sys import itertools import pandas",
"self.translation_table = gene.get_translation_table_copy() self.chromosome = gene.chromosome self.version = gene.version self.reference = gene.reference def",
"var_type (str): insertion, deletion, or substitution alt (str): allele from translation table Returns:",
"== 1 and genotype[\"phased\"]: strand = 1 if max([vcf_geno.index(a) for a in tt_alt_geno])",
"if hap_prob.status != 1: break opt = hap_prob.objective.value() new_called, variants, hap_len, is_ref =",
"self.haplotypes[i]) & (self.translation_table[\"MATCH\"] > 0) ].shape[0] * haplotypes[i] for i in range(num_haps)) self._solve(hap_prob)",
"[]).size) <= 1 # max one strand hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0]",
"self.reference = gene.reference def table_matcher(self) -> None: \"\"\" Matches variants in the translation",
"len(ref) < len(alt): return f'id{alt[1:]}' # Remove first position else: return f's{alt}' def",
"try: return [f's{a}' for a in self.config.IUPAC_CODES[alt]] except KeyError: return [f's{alt}'] def _match(self,",
"+= (lpSum(haplotypes[i] for i in range(num_haps)) <= int(self.config.LP_PARAMS[\"max_haps\"])) # Cannot choose more than",
"self.translation_table[\"MATCH\"] = [m[0] for m in matches] self.translation_table[\"STRAND\"] = [m[1] for m in",
"self.translation_table[self.translation_table.iloc[:,0] == hap] hap_vars.append([1 if var in trans[\"VAR_ID\"].unique() else 0 for var in",
"to in writing, software # distributed under the License is distributed on an",
"len(self.haplotypes) hap_vars = [] for hap in self.haplotypes: trans = self.translation_table[self.translation_table.iloc[:,0] == hap]",
"implied. # See the License for the specific language governing permissions and #",
"alt (str): allele from translation table Returns: [list]: modified allele as list based",
"False called, variants = self.lp_hap() if len(called) > 1: LOGGING.warning(f\"Multiple genotypes possible for",
"This object is not a subclass, but inherits data from the Gene class",
"# List of possible haplotypes def _mod_vcf_record(self, alt: str, ref: str) -> str:",
"sys import itertools import pandas as pd import numpy as np from pulp",
"def _haps_from_prob(self, lp_problem: object) -> tuple: \"\"\" Take a optimally solved lp problem",
"self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]][\"STRAND\"].unique() sc = np.delete(sc, np.where(sc == [3])) return(sc if sc.size >",
"== 1: called.append(self.reference) return called, variants, len(haps), is_ref def _solve(self, hap_prob: object) ->",
"k in no_match]) > 0: drops.append(i) self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop haplotypes that",
"\"deletion\": return [f'id-'] else: try: return [f's{a}' for a in self.config.IUPAC_CODES[alt]] except KeyError:",
"= 1) self.translation_table[\"MATCH\"] = [m[0] for m in matches] self.translation_table[\"STRAND\"] = [m[1] for",
"if self.solver == \"GLPK\": hap_prob.solve(GLPK(msg=0)) else: hap_prob.solve(PULP_CBC_CMD(msg=0)) def lp_hap(self) -> tuple: \"\"\" Build",
"variants \"\"\" self.matched = True matches = self.translation_table.apply(self._match, axis = 1) self.translation_table[\"MATCH\"] =",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis = 1 ) self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] == 99].tolist()) haps = self.translation_table[\"Haplotype",
"self.variants.iloc[i,1]) if self.phased: for i in range(num_haps): hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, []).size)",
"return f'id{alt[1:]}' # Remove first position else: return f's{alt}' def _mod_tt_record(self, var_type: str,",
"\"\"\" Matches variants in the translation table with the subject's variants \"\"\" self.matched",
"# See the License for the specific language governing permissions and # limitations",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"matched variants self.haplotypes = [hap for hap in self.translation_table.iloc[:,0].unique().tolist()] # List of possible",
"Limit alleles that can be chosen based on zygosity for i in range(num_vars):",
"lpSum([h.value() * h for h in haplotypes]) <= hap_len - 1 self._solve(hap_prob) if",
"lp_problem.variables(): if v.varValue: if v.varValue > 0: if v.name.split(\"_\")[0] == f'c{self.chromosome}': variants.append(v.name) else:",
"= True elif len(haps) == 2: called = [haps[0][0], haps[1][0]] else: called =",
"1 ) self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] == 99].tolist()) haps = self.translation_table[\"Haplotype Name\"] no_match =",
"genotype[\"phased\"]: strand = 3 return alt_matches, strand def _haps_from_prob(self, lp_problem: object) -> tuple:",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"you may not use this file except in compliance with the License. #",
"in range(num_vars): # Iterate over every variant # A variant allele can only",
"\"\"\" Create a new haplotype object This object is not a subclass, but",
"[\"insertion\", \"deletion\"]: new_pos = int(row[\"ID\"].split(\"_\")[1]) - 1 ID = f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else: ID =",
"self.variants = self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\", \"STRAND\", \"Type\", \"Variant Start\"]].drop_duplicates() # List of matched variants",
"self.variants[\"VAR_ID\"]]) hap_prob = LpProblem(\"Haplotype Optimization\", LpMaximize) # Define the haplotypes and variants variables",
"= hap_prob.objective.value() opt = max_opt while opt >= (max_opt - float(self.config.LP_PARAMS[\"optimal_decay\"])) and not",
"None: \"\"\" Matches variants in the translation table with the subject's variants \"\"\"",
"called = np.array([np.repeat(i[0], i[1]) for i in haps]).flatten().tolist() if len(called) == 1: called.append(self.reference)",
"variant not matching drops = [] for i in no_match.unique(): if sum([i ==",
"v.varValue)) if len(haps) == 0: called = [self.reference, self.reference] is_ref = True elif",
"i (int): haplotype index default (list): default return if nothing matches or if",
"don't match 100% self.variants = self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\", \"STRAND\", \"Type\", \"Variant Start\"]].drop_duplicates() # List",
"* h for h in haplotypes]) <= hap_len - 1 self._solve(hap_prob) if hap_prob.status",
"tuple: \"\"\" Build and run the LP problem Returns: tuple: list of possible",
"tuple: called haplotypes and associated information \"\"\" is_ref = False haps = []",
"phased call attempt fails self.phased = False called, variants = self.lp_hap() if len(called)",
"self.matched = False self.sample_prefix = sample_prefix self.genotypes = gene.get_sample_vars(sample_prefix) if len(self.genotypes) == 0:",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"self.translation_table[\"Haplotype Name\"] no_match = self.translation_table[self.translation_table[\"MATCH\"] == 0].iloc[:,0] # Haplotypes where there is any",
"float(self.config.LP_PARAMS[\"optimal_decay\"])) and not is_ref and hap_prob.status >= 0: possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants))) hap_prob += lpSum([h.value()",
"with a sample Args: row (pd.core.series.Series): single row from translation table genotypes ([type]):",
"import numpy as np from pulp import * from .gene import AbstractGene from",
"# max one strand hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in",
"\"Binary\") for var in self.variants[\"VAR_ID\"]] # Set constraint of two haplotypes selected hap_prob",
"else: try: return [f's{a}' for a in self.config.IUPAC_CODES[alt]] except KeyError: return [f's{alt}'] def",
"a phased call attempt fails self.phased = False called, variants = self.lp_hap() if",
"len(haps) == 2: called = [haps[0][0], haps[1][0]] else: called = np.array([np.repeat(i[0], i[1]) for",
"tuple: list of possible haplotypes and list of associated variants \"\"\" possible_haplotypes =",
"gene.get_translation_table_copy() self.chromosome = gene.chromosome self.version = gene.version self.reference = gene.reference def table_matcher(self) ->",
"will be re-attempted with phasing off.\") return None, None else: LOGGING.warning(f\"No feasible solution",
"cannot be used more than \"MATCH\" hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k",
"variants variables haplotypes = [LpVariable(hap, cat = \"Integer\", lowBound=0, upBound=2) for hap in",
"limitations under the License. import sys import itertools import pandas as pd import",
"at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in",
"the subject and haplotype classes. Args: gene (Gene): gene.Gene object sample_prefix (str): Sample",
"in self.variants[\"VAR_ID\"]]) hap_prob = LpProblem(\"Haplotype Optimization\", LpMaximize) # Define the haplotypes and variants",
"not be called\") return [], [] else: called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob)",
"tt_alt_geno]) if alt_matches == 1 and genotype[\"phased\"]: strand = 1 if max([vcf_geno.index(a) for",
"and # limitations under the License. import sys import itertools import pandas as",
"(corresponds to the number of matched alleles for a particular position) \"\"\" strand",
"where there is any variant not matching drops = [] for i in",
"in no_match.unique(): if sum([i == k for k in no_match]) > 0: drops.append(i)",
"[list]: modified allele as list based on iupac \"\"\" alt = alt.strip(\"<>\") if",
"== 2 and genotype[\"phased\"]: strand = 3 return alt_matches, strand def _haps_from_prob(self, lp_problem:",
"the subject's variants \"\"\" self.matched = True matches = self.translation_table.apply(self._match, axis = 1)",
"[] for hap in self.haplotypes: trans = self.translation_table[self.translation_table.iloc[:,0] == hap] hap_vars.append([1 if var",
"License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0",
"str: reformatted alt allele \"\"\" if alt is None: return \"-\" if \"<\"",
"new haplotype object This object is not a subclass, but inherits data from",
"self.reference] is_ref = True elif len(haps) == 2: called = [haps[0][0], haps[1][0]] else:",
"in a single translation table row with a sample Args: row (pd.core.series.Series): single",
"if row.iloc[8] in [\"insertion\", \"deletion\"]: new_pos = int(row[\"ID\"].split(\"_\")[1]) - 1 ID = f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID'",
"Evaluate match in a single translation table row with a sample Args: row",
"return [f's{alt}'] def _match(self, row: pd.core.series.Series) -> (int, int): \"\"\" Evaluate match in",
"or substitution alt (str): allele from translation table Returns: [list]: modified allele as",
"i in range(num_haps)) <= int(self.config.LP_PARAMS[\"max_haps\"])) # Cannot choose more than x haplotypes (may",
"self.solver == \"GLPK\": hap_prob.solve(GLPK(msg=0)) else: hap_prob.solve(PULP_CBC_CMD(msg=0)) def lp_hap(self) -> tuple: \"\"\" Build and",
"standardized form Args: var_type (str): insertion, deletion, or substitution alt (str): allele from",
"Name\"] no_match = self.translation_table[self.translation_table[\"MATCH\"] == 0].iloc[:,0] # Haplotypes where there is any variant",
"from pulp import * from .gene import AbstractGene from . import LOGGING class",
"feasible solution found, {self.sample_prefix} will not be called\") return [], [] else: called,",
"\"\"\" Solve for the most likely diplotype Returns: (): Results \"\"\" if not",
"haplotypes and associated information \"\"\" is_ref = False haps = [] variants =",
"LOGGING class NoVariantsException(Exception): \"\"\" Exception to call if a sample is attempted that",
"most likely diplotype Returns: (): Results \"\"\" if not self.matched: print(\"You need to",
"default (list): default return if nothing matches or if all are homozygous Returns:",
"range(num_haps): hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, []).size) <= 1 # max one strand",
"in matches] self.translation_table[\"STRAND\"] = [m[1] for m in matches] self.translation_table[\"VAR_ID\"] = self.translation_table.apply( lambda",
"\"\"\" if alt is None: return \"-\" if \"<\" in alt: return f\"s{alt.strip('<>')}\"",
"used by the subject and haplotype classes. Args: gene (Gene): gene.Gene object sample_prefix",
"0: drops.append(i) self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop haplotypes that don't match 100% self.variants",
"_mod_tt_record(self, var_type: str, alt: str) -> list: \"\"\" Modifies the translation table ref",
"_mod_vcf_record(self, alt: str, ref: str) -> str: \"\"\" Modifies record from VCF to",
"haplotypes = [LpVariable(hap, cat = \"Integer\", lowBound=0, upBound=2) for hap in self.haplotypes] variants",
">= 0: possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants))) hap_prob += lpSum([h.value() * h for h in haplotypes])",
"if self.phased: for i in range(num_haps): hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, []).size) <=",
"> 0: if v.name.split(\"_\")[0] == f'c{self.chromosome}': variants.append(v.name) else: haps.append((v.name, v.varValue)) if len(haps) ==",
"\"\"\" self.phased = gene.phased self.config = config self.solver = gene.solver self.matched = False",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"Args: gene (Gene): gene.Gene object sample_prefix (str): Sample ID \"\"\" self.phased = gene.phased",
"gene.reference def table_matcher(self) -> None: \"\"\" Matches variants in the translation table with",
"if nothing matches or if all are homozygous Returns: list: [description] \"\"\" sc",
"Looks at all strands that are part of a haplotype Removes homozygous calls",
"None else: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will not be called\") return [],",
"2 and genotype[\"phased\"]: strand = 3 return alt_matches, strand def _haps_from_prob(self, lp_problem: object)",
"0, 1, or 2 (corresponds to the number of matched alleles for a",
"in range(num_haps)) <= 1 hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in",
"1 if max([vcf_geno.index(a) for a in tt_alt_geno]) == 1 else -1 elif alt_matches",
"See the License for the specific language governing permissions and # limitations under",
"tuple: \"\"\" Take a optimally solved lp problem Produce called haplotypes Args: lp_problem",
"homozygous Returns: list: [description] \"\"\" sc = self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]][\"STRAND\"].unique() sc = np.delete(sc,",
"[0])[0] for i in range(num_haps)) <= 1 hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0]",
"VCF return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand try: vcf_geno = [self._mod_vcf_record(g, genotype[\"ref\"]) for g in genotype[\"alleles\"]]",
"Returns: list: [description] \"\"\" sc = self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]][\"STRAND\"].unique() sc = np.delete(sc, np.where(sc",
"alt_matches == 2 and genotype[\"phased\"]: strand = 3 return alt_matches, strand def _haps_from_prob(self,",
"if v.varValue: if v.varValue > 0: if v.name.split(\"_\")[0] == f'c{self.chromosome}': variants.append(v.name) else: haps.append((v.name,",
"single row from translation table genotypes ([type]): list of genotypes Returns: int: 99",
"class that has meta information used by the subject and haplotype classes. Args:",
"0 if row.iloc[8] in [\"insertion\", \"deletion\"]: new_pos = int(row[\"ID\"].split(\"_\")[1]) - 1 ID =",
"sub-alleles) # Limit alleles that can be chosen based on zygosity for i",
"v.name.split(\"_\")[0] == f'c{self.chromosome}': variants.append(v.name) else: haps.append((v.name, v.varValue)) if len(haps) == 0: called =",
"self.translation_table[ (self.translation_table.iloc[:,0] == self.haplotypes[i]) & (self.translation_table[\"MATCH\"] > 0) ].shape[0] * haplotypes[i] for i",
"Build and run the LP problem Returns: tuple: list of possible haplotypes and",
"the most likely diplotype Returns: (): Results \"\"\" if not self.matched: print(\"You need",
"License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to",
"a haplotype Removes homozygous calls Args: i (int): haplotype index default (list): default",
"under the License. import sys import itertools import pandas as pd import numpy",
"alt_matches = sum([vcf_geno.count(a) for a in tt_alt_geno]) if alt_matches == 1 and genotype[\"phased\"]:",
"matched alleles for a particular position) \"\"\" strand = 0 if row.iloc[8] in",
"associated information \"\"\" is_ref = False haps = [] variants = [] for",
"Removes homozygous calls Args: i (int): haplotype index default (list): default return if",
"= self.translation_table[\"Haplotype Name\"] no_match = self.translation_table[self.translation_table[\"MATCH\"] == 0].iloc[:,0] # Haplotypes where there is",
"object) -> tuple: \"\"\" Take a optimally solved lp problem Produce called haplotypes",
"haplotype index default (list): default return if nothing matches or if all are",
"genotype[\"phased\"]: strand = 1 if max([vcf_geno.index(a) for a in tt_alt_geno]) == 1 else",
"h in haplotypes]) <= hap_len - 1 self._solve(hap_prob) if hap_prob.status != 1: break",
"variants = self.lp_hap() if called is None: # Happens when a phased call",
"for i in range(num_haps): hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, []).size) <= 1 #",
"# You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #",
"hap_prob.objective.value() new_called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if new_called == called or len(new_called)",
"1 # max one strand hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i",
"only be used once per haplotype, up to two alleles per variant hap_prob",
"return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand try: vcf_geno = [self._mod_vcf_record(g, genotype[\"ref\"]) for g in genotype[\"alleles\"]] except",
"table_matcher function with genotyped before you can optimize\") sys.exit(1) called, variants = self.lp_hap()",
"if var_type == \"insertion\": return [f'id{alt}'] elif var_type == \"deletion\": return [f'id-'] else:",
"the License. import sys import itertools import pandas as pd import numpy as",
"strand def _haps_from_prob(self, lp_problem: object) -> tuple: \"\"\" Take a optimally solved lp",
"Modifies the translation table ref to a standardized form Args: var_type (str): insertion,",
"self.matched: print(\"You need to run the table_matcher function with genotyped before you can",
"from translation table genotypes ([type]): list of genotypes Returns: int: 99 (missing), 0,",
"defined, if matched with a haplotype, MUST be used # Otherwise, variants like",
"self.translation_table[self.translation_table[\"MATCH\"] == 0].iloc[:,0] # Haplotypes where there is any variant not matching drops",
"self.haplotypes: trans = self.translation_table[self.translation_table.iloc[:,0] == hap] hap_vars.append([1 if var in trans[\"VAR_ID\"].unique() else 0",
"0) ].shape[0] * haplotypes[i] for i in range(num_haps)) self._solve(hap_prob) if hap_prob.status != 1:",
"= len(self.haplotypes) hap_vars = [] for hap in self.haplotypes: trans = self.translation_table[self.translation_table.iloc[:,0] ==",
"Args: lp_problem (object): solved lp problem Returns: tuple: called haplotypes and associated information",
"called = [haps[0][0], haps[1][0]] else: called = np.array([np.repeat(i[0], i[1]) for i in haps]).flatten().tolist()",
"# Limit alleles that can be chosen based on zygosity for i in",
"KIND, either express or implied. # See the License for the specific language",
"self.lp_hap() if called is None: # Happens when a phased call attempt fails",
"LpMaximize) # Define the haplotypes and variants variables haplotypes = [LpVariable(hap, cat =",
"a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable",
"= None) -> None: \"\"\" Create a new haplotype object This object is",
"maximize the number of variant alleles used hap_prob += lpSum( self.translation_table[ (self.translation_table.iloc[:,0] ==",
"1: break opt = hap_prob.objective.value() new_called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if new_called",
"haplotype Removes homozygous calls Args: i (int): haplotype index default (list): default return",
"phasing off.\") return None, None else: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will not",
"alleles used hap_prob += lpSum( self.translation_table[ (self.translation_table.iloc[:,0] == self.haplotypes[i]) & (self.translation_table[\"MATCH\"] > 0)",
"hap in self.haplotypes: trans = self.translation_table[self.translation_table.iloc[:,0] == hap] hap_vars.append([1 if var in trans[\"VAR_ID\"].unique()",
"if self.variants.iloc[i,3] == \"CNV\": hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps)))",
"# Set to maximize the number of variant alleles used hap_prob += lpSum(",
"ANY KIND, either express or implied. # See the License for the specific",
"def lp_hap(self) -> tuple: \"\"\" Build and run the LP problem Returns: tuple:",
"Modifies record from VCF to standardized form Args: alt (str): alt allele ref",
"1 ID = f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else: ID = row[\"ID\"] try: genotype = self.genotypes[ID] except",
"if \"<\" in alt: return f\"s{alt.strip('<>')}\" elif len(ref) > len(alt): return \"id-\" elif",
"possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants)) return possible_haplotypes, haplotype_variants max_opt = hap_prob.objective.value() opt = max_opt while opt",
"form Args: alt (str): alt allele ref (str): ref allele Returns: str: reformatted",
"\"CNV\": hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) == self.variants.iloc[i,1]) if",
"str) -> list: \"\"\" Modifies the translation table ref to a standardized form",
"variants = self.lp_hap() if len(called) > 1: LOGGING.warning(f\"Multiple genotypes possible for {self.sample_prefix}.\") return",
"self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop haplotypes that don't match 100% self.variants = self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\", \"STRAND\",",
"return \"id-\" elif len(ref) < len(alt): return f'id{alt[1:]}' # Remove first position else:",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"from the Gene class Conceptually, Gene is a fairly abstract class that has",
"= [] num_vars = self.variants.shape[0] num_haps = len(self.haplotypes) hap_vars = [] for hap",
"gene.chromosome self.version = gene.version self.reference = gene.reference def table_matcher(self) -> None: \"\"\" Matches",
"hap_len, is_ref = self._haps_from_prob(hap_prob) if new_called == called or len(new_called) == 0: break",
"you can optimize\") sys.exit(1) called, variants = self.lp_hap() if called is None: #",
"called = new_called return possible_haplotypes, haplotype_variants def _get_strand_constraint(self, i: int, default: list) ->",
"LpProblem(\"Haplotype Optimization\", LpMaximize) # Define the haplotypes and variants variables haplotypes = [LpVariable(hap,",
"True matches = self.translation_table.apply(self._match, axis = 1) self.translation_table[\"MATCH\"] = [m[0] for m in",
">= (max_opt - float(self.config.LP_PARAMS[\"optimal_decay\"])) and not is_ref and hap_prob.status >= 0: possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants)))",
"any variant not matching drops = [] for i in no_match.unique(): if sum([i",
"= self.translation_table[self.translation_table[\"MATCH\"] == 0].iloc[:,0] # Haplotypes where there is any variant not matching",
"a subclass, but inherits data from the Gene class Conceptually, Gene is a",
"over every variant # A variant allele can only be used once per",
"fails self.phased = False called, variants = self.lp_hap() if len(called) > 1: LOGGING.warning(f\"Multiple",
"a optimally solved lp problem Produce called haplotypes Args: lp_problem (object): solved lp",
"k for k in no_match]) > 0: drops.append(i) self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop",
"of genotypes Returns: int: 99 (missing), 0, 1, or 2 (corresponds to the",
"problem Returns: tuple: called haplotypes and associated information \"\"\" is_ref = False haps",
"(str): allele from translation table Returns: [list]: modified allele as list based on",
"None) -> None: \"\"\" Create a new haplotype object This object is not",
"hap_prob += lpSum( self.translation_table[ (self.translation_table.iloc[:,0] == self.haplotypes[i]) & (self.translation_table[\"MATCH\"] > 0) ].shape[0] *",
"== hap] hap_vars.append([1 if var in trans[\"VAR_ID\"].unique() else 0 for var in self.variants[\"VAR_ID\"]])",
"that can be chosen based on zygosity for i in range(num_vars): # Iterate",
"opt = hap_prob.objective.value() new_called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if new_called == called",
"haplotype_variants.append(tuple(sorted(variants))) hap_prob += lpSum([h.value() * h for h in haplotypes]) <= hap_len -",
"matches] self.translation_table[\"STRAND\"] = [m[1] for m in matches] self.translation_table[\"VAR_ID\"] = self.translation_table.apply( lambda x:",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"haps]).flatten().tolist() if len(called) == 1: called.append(self.reference) return called, variants, len(haps), is_ref def _solve(self,",
"+= lpSum([h.value() * h for h in haplotypes]) <= hap_len - 1 self._solve(hap_prob)",
"!= 1: if self.phased: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will be re-attempted with",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"that don't match 100% self.variants = self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\", \"STRAND\", \"Type\", \"Variant Start\"]].drop_duplicates() #",
"applicable law or agreed to in writing, software # distributed under the License",
"allele can only be used once per haplotype, up to two alleles per",
"= self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]][\"STRAND\"].unique() sc = np.delete(sc, np.where(sc == [3])) return(sc if sc.size",
"genotypes Returns: int: 99 (missing), 0, 1, or 2 (corresponds to the number",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"AttributeError: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand if vcf_geno == [\"-\", \"-\"]: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand tt_alt_geno",
"the constraint for phased data Looks at all strands that are part of",
"as pd import numpy as np from pulp import * from .gene import",
"int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand tt_alt_geno = self._mod_tt_record(row.iloc[8], row.iloc[7]) alt_matches = sum([vcf_geno.count(a) for a in tt_alt_geno])",
"alleles for a particular position) \"\"\" strand = 0 if row.iloc[8] in [\"insertion\",",
"i in range(num_haps)) self._solve(hap_prob) if hap_prob.status != 1: if self.phased: LOGGING.warning(f\"No feasible solution",
"a standardized form Args: var_type (str): insertion, deletion, or substitution alt (str): allele",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"== 0: called = [self.reference, self.reference] is_ref = True elif len(haps) == 2:",
"constraint for phased data Looks at all strands that are part of a",
"to maximize the number of variant alleles used hap_prob += lpSum( self.translation_table[ (self.translation_table.iloc[:,0]",
"lp_hap(self) -> tuple: \"\"\" Build and run the LP problem Returns: tuple: list",
"1, or 2 (corresponds to the number of matched alleles for a particular",
"1 self._solve(hap_prob) if hap_prob.status != 1: break opt = hap_prob.objective.value() new_called, variants, hap_len,",
"compliance with the License. # You may obtain a copy of the License",
"two haplotypes selected hap_prob += (lpSum(haplotypes[i] for i in range(num_haps)) <= int(self.config.LP_PARAMS[\"max_haps\"])) #",
"sample Args: row (pd.core.series.Series): single row from translation table genotypes ([type]): list of",
"problem Produce called haplotypes Args: lp_problem (object): solved lp problem Returns: tuple: called",
"max one strand hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps))",
"not is_ref and hap_prob.status >= 0: possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants))) hap_prob += lpSum([h.value() * h",
"\"\"\" Build and run the LP problem Returns: tuple: list of possible haplotypes",
"for the specific language governing permissions and # limitations under the License. import",
"is a fairly abstract class that has meta information used by the subject",
"before you can optimize\") sys.exit(1) called, variants = self.lp_hap() if called is None:",
"hap_prob += (lpSum(haplotypes[i] for i in range(num_haps)) <= int(self.config.LP_PARAMS[\"max_haps\"])) # Cannot choose more",
"than \"MATCH\" hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) <= self.variants.iloc[i,1]",
"alleles per variant hap_prob += (variants[i] <= (lpSum(hap_vars[k][i] * haplotypes[k] for k in",
"= self._haps_from_prob(hap_prob) if is_ref: possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants)) return possible_haplotypes, haplotype_variants max_opt = hap_prob.objective.value() opt",
"* self._get_strand_constraint(i, []).size) <= 1 # max one strand hap_prob += lpSum(haplotypes[i] *",
"not a subclass, but inherits data from the Gene class Conceptually, Gene is",
"for i in range(num_haps)) self._solve(hap_prob) if hap_prob.status != 1: if self.phased: LOGGING.warning(f\"No feasible",
"hap in self.translation_table.iloc[:,0].unique().tolist()] # List of possible haplotypes def _mod_vcf_record(self, alt: str, ref:",
"allele \"\"\" if alt is None: return \"-\" if \"<\" in alt: return",
"else -1 elif alt_matches == 2 and genotype[\"phased\"]: strand = 3 return alt_matches,",
"def _solve(self, hap_prob: object) -> object: if self.solver == \"GLPK\": hap_prob.solve(GLPK(msg=0)) else: hap_prob.solve(PULP_CBC_CMD(msg=0))",
"hap_vars = [] for hap in self.haplotypes: trans = self.translation_table[self.translation_table.iloc[:,0] == hap] hap_vars.append([1",
"table with the subject's variants \"\"\" self.matched = True matches = self.translation_table.apply(self._match, axis",
"sample_prefix: str, config = None) -> None: \"\"\" Create a new haplotype object",
"(max_opt - float(self.config.LP_PARAMS[\"optimal_decay\"])) and not is_ref and hap_prob.status >= 0: possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants))) hap_prob",
"feasible solution found, {self.sample_prefix} will be re-attempted with phasing off.\") return None, None",
"if len(haps) == 0: called = [self.reference, self.reference] is_ref = True elif len(haps)",
"copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law",
"0].iloc[:,0] # Haplotypes where there is any variant not matching drops = []",
"optimize_hap(self) -> (): \"\"\" Solve for the most likely diplotype Returns: (): Results",
"data from the Gene class Conceptually, Gene is a fairly abstract class that",
"(the \"License\"); # you may not use this file except in compliance with",
"* from .gene import AbstractGene from . import LOGGING class NoVariantsException(Exception): \"\"\" Exception",
"Optimization\", LpMaximize) # Define the haplotypes and variants variables haplotypes = [LpVariable(hap, cat",
"# Unless required by applicable law or agreed to in writing, software #",
"if var in trans[\"VAR_ID\"].unique() else 0 for var in self.variants[\"VAR_ID\"]]) hap_prob = LpProblem(\"Haplotype",
"by applicable law or agreed to in writing, software # distributed under the",
"table Returns: [list]: modified allele as list based on iupac \"\"\" alt =",
"hap_prob.solve(GLPK(msg=0)) else: hap_prob.solve(PULP_CBC_CMD(msg=0)) def lp_hap(self) -> tuple: \"\"\" Build and run the LP",
"gene.get_sample_vars(sample_prefix) if len(self.genotypes) == 0: raise NoVariantsException self.translation_table = gene.get_translation_table_copy() self.chromosome = gene.chromosome",
"once per haplotype, up to two alleles per variant hap_prob += (variants[i] <=",
"range(num_haps)) self._solve(hap_prob) if hap_prob.status != 1: if self.phased: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix}",
"lambda x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis = 1 ) self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] == 99].tolist()) haps",
"i in no_match.unique(): if sum([i == k for k in no_match]) > 0:",
"raise NoVariantsException self.translation_table = gene.get_translation_table_copy() self.chromosome = gene.chromosome self.version = gene.version self.reference =",
"var_type: str, alt: str) -> list: \"\"\" Modifies the translation table ref to",
"> 0) ].shape[0] * haplotypes[i] for i in range(num_haps)) self._solve(hap_prob) if hap_prob.status !=",
"can optimize\") sys.exit(1) called, variants = self.lp_hap() if called is None: # Happens",
"file except in compliance with the License. # You may obtain a copy",
"list based on iupac \"\"\" alt = alt.strip(\"<>\") if var_type == \"insertion\": return",
"[hap for hap in self.translation_table.iloc[:,0].unique().tolist()] # List of possible haplotypes def _mod_vcf_record(self, alt:",
"hap_vars.append([1 if var in trans[\"VAR_ID\"].unique() else 0 for var in self.variants[\"VAR_ID\"]]) hap_prob =",
"- 1 ID = f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else: ID = row[\"ID\"] try: genotype = self.genotypes[ID]",
"Iterate over every variant # A variant allele can only be used once",
"is_ref def _solve(self, hap_prob: object) -> object: if self.solver == \"GLPK\": hap_prob.solve(GLPK(msg=0)) else:",
"== 1 else -1 elif alt_matches == 2 and genotype[\"phased\"]: strand = 3",
"match 100% self.variants = self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\", \"STRAND\", \"Type\", \"Variant Start\"]].drop_duplicates() # List of",
"config self.solver = gene.solver self.matched = False self.sample_prefix = sample_prefix self.genotypes = gene.get_sample_vars(sample_prefix)",
"self.haplotypes] variants = [LpVariable(var, cat = \"Binary\") for var in self.variants[\"VAR_ID\"]] # Set",
"from .gene import AbstractGene from . import LOGGING class NoVariantsException(Exception): \"\"\" Exception to",
"[] for v in lp_problem.variables(): if v.varValue: if v.varValue > 0: if v.name.split(\"_\")[0]",
"the translation table with the subject's variants \"\"\" self.matched = True matches =",
"hap in self.haplotypes] variants = [LpVariable(var, cat = \"Binary\") for var in self.variants[\"VAR_ID\"]]",
"VCF to standardized form Args: alt (str): alt allele ref (str): ref allele",
"np from pulp import * from .gene import AbstractGene from . import LOGGING",
"in [\"insertion\", \"deletion\"]: new_pos = int(row[\"ID\"].split(\"_\")[1]) - 1 ID = f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else: ID",
"self._haps_from_prob(hap_prob) if is_ref: possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants)) return possible_haplotypes, haplotype_variants max_opt = hap_prob.objective.value() opt =",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"h for h in haplotypes]) <= hap_len - 1 self._solve(hap_prob) if hap_prob.status !=",
"axis = 1 ) self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] == 99].tolist()) haps = self.translation_table[\"Haplotype Name\"]",
"str, alt: str) -> list: \"\"\" Modifies the translation table ref to a",
"as np from pulp import * from .gene import AbstractGene from . import",
"return possible_haplotypes, haplotype_variants def _get_strand_constraint(self, i: int, default: list) -> list: \"\"\" Helps",
"gene: AbstractGene, sample_prefix: str, config = None) -> None: \"\"\" Create a new",
"for m in matches] self.translation_table[\"STRAND\"] = [m[1] for m in matches] self.translation_table[\"VAR_ID\"] =",
"called, variants = self.lp_hap() if called is None: # Happens when a phased",
". import LOGGING class NoVariantsException(Exception): \"\"\" Exception to call if a sample is",
"gene (Gene): gene.Gene object sample_prefix (str): Sample ID \"\"\" self.phased = gene.phased self.config",
"\"\"\" strand = 0 if row.iloc[8] in [\"insertion\", \"deletion\"]: new_pos = int(row[\"ID\"].split(\"_\")[1]) -",
"# Copyright 2021 <NAME> # Licensed under the Apache License, Version 2.0 (the",
"first position else: return f's{alt}' def _mod_tt_record(self, var_type: str, alt: str) -> list:",
"if max([vcf_geno.index(a) for a in tt_alt_geno]) == 1 else -1 elif alt_matches ==",
"haps[1][0]] else: called = np.array([np.repeat(i[0], i[1]) for i in haps]).flatten().tolist() if len(called) ==",
"if hap_prob.status != 1: if self.phased: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will be",
"of matched variants self.haplotypes = [hap for hap in self.translation_table.iloc[:,0].unique().tolist()] # List of",
"<= self.variants.iloc[i,1] * variants[i]) # Any CNV variants defined, if matched with a",
"= self.lp_hap() if called is None: # Happens when a phased call attempt",
"\"\"\" alt = alt.strip(\"<>\") if var_type == \"insertion\": return [f'id{alt}'] elif var_type ==",
"if sc.size > 0 else np.array(default)) def optimize_hap(self) -> (): \"\"\" Solve for",
"= hap_prob.objective.value() new_called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if new_called == called or",
"if is_ref: possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants)) return possible_haplotypes, haplotype_variants max_opt = hap_prob.objective.value() opt = max_opt",
"the Gene class Conceptually, Gene is a fairly abstract class that has meta",
"in the translation table with the subject's variants \"\"\" self.matched = True matches",
"governing permissions and # limitations under the License. import sys import itertools import",
"len(ref) > len(alt): return \"id-\" elif len(ref) < len(alt): return f'id{alt[1:]}' # Remove",
"-> object: if self.solver == \"GLPK\": hap_prob.solve(GLPK(msg=0)) else: hap_prob.solve(PULP_CBC_CMD(msg=0)) def lp_hap(self) -> tuple:",
"== \"deletion\": return [f'id-'] else: try: return [f's{a}' for a in self.config.IUPAC_CODES[alt]] except",
"or len(new_called) == 0: break called = new_called return possible_haplotypes, haplotype_variants def _get_strand_constraint(self,",
"selected hap_prob += (lpSum(haplotypes[i] for i in range(num_haps)) <= int(self.config.LP_PARAMS[\"max_haps\"])) # Cannot choose",
"from translation table Returns: [list]: modified allele as list based on iupac \"\"\"",
"= [] for v in lp_problem.variables(): if v.varValue: if v.varValue > 0: if",
"self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) <= 1 hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i,",
"for v in lp_problem.variables(): if v.varValue: if v.varValue > 0: if v.name.split(\"_\")[0] ==",
"lp_problem (object): solved lp problem Returns: tuple: called haplotypes and associated information \"\"\"",
"of variant alleles used hap_prob += lpSum( self.translation_table[ (self.translation_table.iloc[:,0] == self.haplotypes[i]) & (self.translation_table[\"MATCH\"]",
"run the table_matcher function with genotyped before you can optimize\") sys.exit(1) called, variants",
"Args: var_type (str): insertion, deletion, or substitution alt (str): allele from translation table",
"optimally solved lp problem Produce called haplotypes Args: lp_problem (object): solved lp problem",
"0: raise NoVariantsException self.translation_table = gene.get_translation_table_copy() self.chromosome = gene.chromosome self.version = gene.version self.reference",
"in alt: return f\"s{alt.strip('<>')}\" elif len(ref) > len(alt): return \"id-\" elif len(ref) <",
"in tt_alt_geno]) if alt_matches == 1 and genotype[\"phased\"]: strand = 1 if max([vcf_geno.index(a)",
"int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand try: vcf_geno = [self._mod_vcf_record(g, genotype[\"ref\"]) for g in genotype[\"alleles\"]] except AttributeError:",
"[f's{a}' for a in self.config.IUPAC_CODES[alt]] except KeyError: return [f's{alt}'] def _match(self, row: pd.core.series.Series)",
"self._solve(hap_prob) if hap_prob.status != 1: break opt = hap_prob.objective.value() new_called, variants, hap_len, is_ref",
"Set to maximize the number of variant alleles used hap_prob += lpSum( self.translation_table[",
"(lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps)))) # A given variant cannot be",
"= [] haplotype_variants = [] num_vars = self.variants.shape[0] num_haps = len(self.haplotypes) hap_vars =",
"+= ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) == self.variants.iloc[i,1]) if self.phased: for",
"LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will not be called\") return [], [] else:",
"no_match.unique(): if sum([i == k for k in no_match]) > 0: drops.append(i) self.translation_table",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"strand = 0 if row.iloc[8] in [\"insertion\", \"deletion\"]: new_pos = int(row[\"ID\"].split(\"_\")[1]) - 1",
"variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if new_called == called or len(new_called) == 0:",
"be chosen based on zygosity for i in range(num_vars): # Iterate over every",
"increased to find novel sub-alleles) # Limit alleles that can be chosen based",
"be used # Otherwise, variants like CYP2D6*5 will be missed by the other",
"self.config = config self.solver = gene.solver self.matched = False self.sample_prefix = sample_prefix self.genotypes",
"Matches variants in the translation table with the subject's variants \"\"\" self.matched =",
"based on iupac \"\"\" alt = alt.strip(\"<>\") if var_type == \"insertion\": return [f'id{alt}']",
"= [m[1] for m in matches] self.translation_table[\"VAR_ID\"] = self.translation_table.apply( lambda x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis",
"= self._mod_tt_record(row.iloc[8], row.iloc[7]) alt_matches = sum([vcf_geno.count(a) for a in tt_alt_geno]) if alt_matches ==",
"def _match(self, row: pd.core.series.Series) -> (int, int): \"\"\" Evaluate match in a single",
"has meta information used by the subject and haplotype classes. Args: gene (Gene):",
"-> tuple: \"\"\" Take a optimally solved lp problem Produce called haplotypes Args:",
"\"GLPK\": hap_prob.solve(GLPK(msg=0)) else: hap_prob.solve(PULP_CBC_CMD(msg=0)) def lp_hap(self) -> tuple: \"\"\" Build and run the",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"will be missed by the other methods if self.variants.iloc[i,3] == \"CNV\": hap_prob +=",
"Start\"]].drop_duplicates() # List of matched variants self.haplotypes = [hap for hap in self.translation_table.iloc[:,0].unique().tolist()]",
"object sample_prefix (str): Sample ID \"\"\" self.phased = gene.phased self.config = config self.solver",
"[] haplotype_variants = [] num_vars = self.variants.shape[0] num_haps = len(self.haplotypes) hap_vars = []",
"= [] for i in no_match.unique(): if sum([i == k for k in",
"for var in self.variants[\"VAR_ID\"]] # Set constraint of two haplotypes selected hap_prob +=",
"self.variants.iloc[i,1] * variants[i]) # Any CNV variants defined, if matched with a haplotype,",
"matches] self.translation_table[\"VAR_ID\"] = self.translation_table.apply( lambda x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis = 1 ) self.translation_table =",
"self._haps_from_prob(hap_prob) if new_called == called or len(new_called) == 0: break called = new_called",
"for k in range(num_haps))) == self.variants.iloc[i,1]) if self.phased: for i in range(num_haps): hap_prob",
"class Conceptually, Gene is a fairly abstract class that has meta information used",
"haplotypes Args: lp_problem (object): solved lp problem Returns: tuple: called haplotypes and associated",
"0 else np.array(default)) def optimize_hap(self) -> (): \"\"\" Solve for the most likely",
"can only be used once per haplotype, up to two alleles per variant",
"def table_matcher(self) -> None: \"\"\" Matches variants in the translation table with the",
"in range(num_haps)))) # A given variant cannot be used more than \"MATCH\" hap_prob",
"trans[\"VAR_ID\"].unique() else 0 for var in self.variants[\"VAR_ID\"]]) hap_prob = LpProblem(\"Haplotype Optimization\", LpMaximize) #",
"False haps = [] variants = [] for v in lp_problem.variables(): if v.varValue:",
"(str): alt allele ref (str): ref allele Returns: str: reformatted alt allele \"\"\"",
"the License for the specific language governing permissions and # limitations under the",
"import pandas as pd import numpy as np from pulp import * from",
"for i in range(num_haps)) >= -1 # Set to maximize the number of",
"(missing), 0, 1, or 2 (corresponds to the number of matched alleles for",
"+= lpSum( self.translation_table[ (self.translation_table.iloc[:,0] == self.haplotypes[i]) & (self.translation_table[\"MATCH\"] > 0) ].shape[0] * haplotypes[i]",
"the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed",
"* variants[i]) # Any CNV variants defined, if matched with a haplotype, MUST",
"ID = f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else: ID = row[\"ID\"] try: genotype = self.genotypes[ID] except KeyError:",
"of associated variants \"\"\" possible_haplotypes = [] haplotype_variants = [] num_vars = self.variants.shape[0]",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"return(sc if sc.size > 0 else np.array(default)) def optimize_hap(self) -> (): \"\"\" Solve",
"= max_opt while opt >= (max_opt - float(self.config.LP_PARAMS[\"optimal_decay\"])) and not is_ref and hap_prob.status",
"strand if vcf_geno == [\"-\", \"-\"]: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand tt_alt_geno = self._mod_tt_record(row.iloc[8], row.iloc[7])",
"alleles that can be chosen based on zygosity for i in range(num_vars): #",
"].shape[0] * haplotypes[i] for i in range(num_haps)) self._solve(hap_prob) if hap_prob.status != 1: if",
"variant cannot be used more than \"MATCH\" hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for",
"\"\"\" Evaluate match in a single translation table row with a sample Args:",
"self.phased = False called, variants = self.lp_hap() if len(called) > 1: LOGGING.warning(f\"Multiple genotypes",
"for i in no_match.unique(): if sum([i == k for k in no_match]) >",
"try: genotype = self.genotypes[ID] except KeyError: # Not in VCF return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand",
"hap_prob: object) -> object: if self.solver == \"GLPK\": hap_prob.solve(GLPK(msg=0)) else: hap_prob.solve(PULP_CBC_CMD(msg=0)) def lp_hap(self)",
"Otherwise, variants like CYP2D6*5 will be missed by the other methods if self.variants.iloc[i,3]",
"ref: str) -> str: \"\"\" Modifies record from VCF to standardized form Args:",
"in self.haplotypes: trans = self.translation_table[self.translation_table.iloc[:,0] == hap] hap_vars.append([1 if var in trans[\"VAR_ID\"].unique() else",
"on iupac \"\"\" alt = alt.strip(\"<>\") if var_type == \"insertion\": return [f'id{alt}'] elif",
"obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by",
"sc = np.delete(sc, np.where(sc == [3])) return(sc if sc.size > 0 else np.array(default))",
"_solve(self, hap_prob: object) -> object: if self.solver == \"GLPK\": hap_prob.solve(GLPK(msg=0)) else: hap_prob.solve(PULP_CBC_CMD(msg=0)) def",
"-> list: \"\"\" Modifies the translation table ref to a standardized form Args:",
"hap_prob.objective.value() opt = max_opt while opt >= (max_opt - float(self.config.LP_PARAMS[\"optimal_decay\"])) and not is_ref",
"0: called = [self.reference, self.reference] is_ref = True elif len(haps) == 2: called",
"call attempt fails self.phased = False called, variants = self.lp_hap() if len(called) >",
"haplotypes and list of associated variants \"\"\" possible_haplotypes = [] haplotype_variants = []",
"int(self.config.LP_PARAMS[\"max_haps\"])) # Cannot choose more than x haplotypes (may be increased to find",
"lpSum( self.translation_table[ (self.translation_table.iloc[:,0] == self.haplotypes[i]) & (self.translation_table[\"MATCH\"] > 0) ].shape[0] * haplotypes[i] for",
"len(new_called) == 0: break called = new_called return possible_haplotypes, haplotype_variants def _get_strand_constraint(self, i:",
"lp problem Produce called haplotypes Args: lp_problem (object): solved lp problem Returns: tuple:",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"KeyError: return [f's{alt}'] def _match(self, row: pd.core.series.Series) -> (int, int): \"\"\" Evaluate match",
"elif alt_matches == 2 and genotype[\"phased\"]: strand = 3 return alt_matches, strand def",
"new_pos = int(row[\"ID\"].split(\"_\")[1]) - 1 ID = f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else: ID = row[\"ID\"] try:",
"allele from translation table Returns: [list]: modified allele as list based on iupac",
"self.version = gene.version self.reference = gene.reference def table_matcher(self) -> None: \"\"\" Matches variants",
"if not self.matched: print(\"You need to run the table_matcher function with genotyped before",
"if called is None: # Happens when a phased call attempt fails self.phased",
"object This object is not a subclass, but inherits data from the Gene",
"# limitations under the License. import sys import itertools import pandas as pd",
"nothing matches or if all are homozygous Returns: list: [description] \"\"\" sc =",
"# Set constraint of two haplotypes selected hap_prob += (lpSum(haplotypes[i] for i in",
"allele Returns: str: reformatted alt allele \"\"\" if alt is None: return \"-\"",
"k in range(num_haps))) <= self.variants.iloc[i,1] * variants[i]) # Any CNV variants defined, if",
"= False called, variants = self.lp_hap() if len(called) > 1: LOGGING.warning(f\"Multiple genotypes possible",
"f'id{alt[1:]}' # Remove first position else: return f's{alt}' def _mod_tt_record(self, var_type: str, alt:",
"alt: str) -> list: \"\"\" Modifies the translation table ref to a standardized",
"range(num_haps)) >= -1 # Set to maximize the number of variant alleles used",
"\"\"\" Modifies record from VCF to standardized form Args: alt (str): alt allele",
"zero variants defined. \"\"\" pass class Haplotype: def __init__(self, gene: AbstractGene, sample_prefix: str,",
"= np.delete(sc, np.where(sc == [3])) return(sc if sc.size > 0 else np.array(default)) def",
"specific language governing permissions and # limitations under the License. import sys import",
"in self.config.IUPAC_CODES[alt]] except KeyError: return [f's{alt}'] def _match(self, row: pd.core.series.Series) -> (int, int):",
"[description] \"\"\" sc = self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]][\"STRAND\"].unique() sc = np.delete(sc, np.where(sc == [3]))",
"\"\"\" Helps to assemble the constraint for phased data Looks at all strands",
"Define the haplotypes and variants variables haplotypes = [LpVariable(hap, cat = \"Integer\", lowBound=0,",
"ref (str): ref allele Returns: str: reformatted alt allele \"\"\" if alt is",
"f\"s{alt.strip('<>')}\" elif len(ref) > len(alt): return \"id-\" elif len(ref) < len(alt): return f'id{alt[1:]}'",
"x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis = 1 ) self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] == 99].tolist()) haps =",
"# Haplotypes where there is any variant not matching drops = [] for",
"axis = 1) self.translation_table[\"MATCH\"] = [m[0] for m in matches] self.translation_table[\"STRAND\"] = [m[1]",
"row (pd.core.series.Series): single row from translation table genotypes ([type]): list of genotypes Returns:",
"for a particular position) \"\"\" strand = 0 if row.iloc[8] in [\"insertion\", \"deletion\"]:",
"None: # Happens when a phased call attempt fails self.phased = False called,",
"(list): default return if nothing matches or if all are homozygous Returns: list:",
"row from translation table genotypes ([type]): list of genotypes Returns: int: 99 (missing),",
"sys.exit(1) called, variants = self.lp_hap() if called is None: # Happens when a",
"sample_prefix (str): Sample ID \"\"\" self.phased = gene.phased self.config = config self.solver =",
") self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] == 99].tolist()) haps = self.translation_table[\"Haplotype Name\"] no_match = self.translation_table[self.translation_table[\"MATCH\"]",
"str: \"\"\" Modifies record from VCF to standardized form Args: alt (str): alt",
"possible haplotypes def _mod_vcf_record(self, alt: str, ref: str) -> str: \"\"\" Modifies record",
"variants, len(haps), is_ref def _solve(self, hap_prob: object) -> object: if self.solver == \"GLPK\":",
"missed by the other methods if self.variants.iloc[i,3] == \"CNV\": hap_prob += ((lpSum(hap_vars[k][i] *",
"str) -> str: \"\"\" Modifies record from VCF to standardized form Args: alt",
"self.haplotypes[i]][\"STRAND\"].unique() sc = np.delete(sc, np.where(sc == [3])) return(sc if sc.size > 0 else",
"pass class Haplotype: def __init__(self, gene: AbstractGene, sample_prefix: str, config = None) ->",
"haplotype_variants = [] num_vars = self.variants.shape[0] num_haps = len(self.haplotypes) hap_vars = [] for",
"\"MATCH\" hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) <= self.variants.iloc[i,1] *",
"will not be called\") return [], [] else: called, variants, hap_len, is_ref =",
"hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) == self.variants.iloc[i,1]) if self.phased:",
"fairly abstract class that has meta information used by the subject and haplotype",
"numpy as np from pulp import * from .gene import AbstractGene from .",
"- 1 self._solve(hap_prob) if hap_prob.status != 1: break opt = hap_prob.objective.value() new_called, variants,",
"= self.lp_hap() if len(called) > 1: LOGGING.warning(f\"Multiple genotypes possible for {self.sample_prefix}.\") return called,",
"and variants variables haplotypes = [LpVariable(hap, cat = \"Integer\", lowBound=0, upBound=2) for hap",
"is not a subclass, but inherits data from the Gene class Conceptually, Gene",
"possible_haplotypes, haplotype_variants def _get_strand_constraint(self, i: int, default: list) -> list: \"\"\" Helps to",
"99 (missing), 0, 1, or 2 (corresponds to the number of matched alleles",
"is None: # Happens when a phased call attempt fails self.phased = False",
"in trans[\"VAR_ID\"].unique() else 0 for var in self.variants[\"VAR_ID\"]]) hap_prob = LpProblem(\"Haplotype Optimization\", LpMaximize)",
"Happens when a phased call attempt fails self.phased = False called, variants =",
"Create a new haplotype object This object is not a subclass, but inherits",
"= 1 ) self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] == 99].tolist()) haps = self.translation_table[\"Haplotype Name\"] no_match",
"in lp_problem.variables(): if v.varValue: if v.varValue > 0: if v.name.split(\"_\")[0] == f'c{self.chromosome}': variants.append(v.name)",
"row.iloc[7]) alt_matches = sum([vcf_geno.count(a) for a in tt_alt_geno]) if alt_matches == 1 and",
"iupac \"\"\" alt = alt.strip(\"<>\") if var_type == \"insertion\": return [f'id{alt}'] elif var_type",
"list: [description] \"\"\" sc = self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]][\"STRAND\"].unique() sc = np.delete(sc, np.where(sc ==",
"g in genotype[\"alleles\"]] except AttributeError: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand if vcf_geno == [\"-\", \"-\"]:",
"\"Integer\", lowBound=0, upBound=2) for hap in self.haplotypes] variants = [LpVariable(var, cat = \"Binary\")",
"OF ANY KIND, either express or implied. # See the License for the",
"based on zygosity for i in range(num_vars): # Iterate over every variant #",
"the other methods if self.variants.iloc[i,3] == \"CNV\": hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for",
"v in lp_problem.variables(): if v.varValue: if v.varValue > 0: if v.name.split(\"_\")[0] == f'c{self.chromosome}':",
"classes. Args: gene (Gene): gene.Gene object sample_prefix (str): Sample ID \"\"\" self.phased =",
"variants like CYP2D6*5 will be missed by the other methods if self.variants.iloc[i,3] ==",
"object: if self.solver == \"GLPK\": hap_prob.solve(GLPK(msg=0)) else: hap_prob.solve(PULP_CBC_CMD(msg=0)) def lp_hap(self) -> tuple: \"\"\"",
"information used by the subject and haplotype classes. Args: gene (Gene): gene.Gene object",
"alt allele ref (str): ref allele Returns: str: reformatted alt allele \"\"\" if",
"= \"Integer\", lowBound=0, upBound=2) for hap in self.haplotypes] variants = [LpVariable(var, cat =",
"\"\"\" possible_haplotypes = [] haplotype_variants = [] num_vars = self.variants.shape[0] num_haps = len(self.haplotypes)",
"\"\"\" is_ref = False haps = [] variants = [] for v in",
"and not is_ref and hap_prob.status >= 0: possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants))) hap_prob += lpSum([h.value() *",
"associated variants \"\"\" possible_haplotypes = [] haplotype_variants = [] num_vars = self.variants.shape[0] num_haps",
"self.chromosome = gene.chromosome self.version = gene.version self.reference = gene.reference def table_matcher(self) -> None:",
"in self.translation_table.iloc[:,0].unique().tolist()] # List of possible haplotypes def _mod_vcf_record(self, alt: str, ref: str)",
"99].tolist()) haps = self.translation_table[\"Haplotype Name\"] no_match = self.translation_table[self.translation_table[\"MATCH\"] == 0].iloc[:,0] # Haplotypes where",
"if a sample is attempted that has zero variants defined. \"\"\" pass class",
"strand = 3 return alt_matches, strand def _haps_from_prob(self, lp_problem: object) -> tuple: \"\"\"",
"List of matched variants self.haplotypes = [hap for hap in self.translation_table.iloc[:,0].unique().tolist()] # List",
"opt >= (max_opt - float(self.config.LP_PARAMS[\"optimal_decay\"])) and not is_ref and hap_prob.status >= 0: possible_haplotypes.append(tuple(sorted(called)))",
"sc.size > 0 else np.array(default)) def optimize_hap(self) -> (): \"\"\" Solve for the",
"len(alt): return f'id{alt[1:]}' # Remove first position else: return f's{alt}' def _mod_tt_record(self, var_type:",
"be missed by the other methods if self.variants.iloc[i,3] == \"CNV\": hap_prob += ((lpSum(hap_vars[k][i]",
"for i in range(num_vars): # Iterate over every variant # A variant allele",
"var in self.variants[\"VAR_ID\"]] # Set constraint of two haplotypes selected hap_prob += (lpSum(haplotypes[i]",
"variants self.haplotypes = [hap for hap in self.translation_table.iloc[:,0].unique().tolist()] # List of possible haplotypes",
"Cannot choose more than x haplotypes (may be increased to find novel sub-alleles)",
"Returns: tuple: called haplotypes and associated information \"\"\" is_ref = False haps =",
"and genotype[\"phased\"]: strand = 3 return alt_matches, strand def _haps_from_prob(self, lp_problem: object) ->",
"variants \"\"\" possible_haplotypes = [] haplotype_variants = [] num_vars = self.variants.shape[0] num_haps =",
"is_ref and hap_prob.status >= 0: possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants))) hap_prob += lpSum([h.value() * h for",
"1 and genotype[\"phased\"]: strand = 1 if max([vcf_geno.index(a) for a in tt_alt_geno]) ==",
"haplotypes selected hap_prob += (lpSum(haplotypes[i] for i in range(num_haps)) <= int(self.config.LP_PARAMS[\"max_haps\"])) # Cannot",
"* self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) >= -1 # Set to maximize",
"from VCF to standardized form Args: alt (str): alt allele ref (str): ref",
"or agreed to in writing, software # distributed under the License is distributed",
"# Iterate over every variant # A variant allele can only be used",
"__init__(self, gene: AbstractGene, sample_prefix: str, config = None) -> None: \"\"\" Create a",
"object) -> object: if self.solver == \"GLPK\": hap_prob.solve(GLPK(msg=0)) else: hap_prob.solve(PULP_CBC_CMD(msg=0)) def lp_hap(self) ->",
"elif len(ref) < len(alt): return f'id{alt[1:]}' # Remove first position else: return f's{alt}'",
"int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand if vcf_geno == [\"-\", \"-\"]: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand tt_alt_geno = self._mod_tt_record(row.iloc[8],",
"[LpVariable(hap, cat = \"Integer\", lowBound=0, upBound=2) for hap in self.haplotypes] variants = [LpVariable(var,",
"row[\"ID\"] try: genotype = self.genotypes[ID] except KeyError: # Not in VCF return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]),",
"Args: i (int): haplotype index default (list): default return if nothing matches or",
"Take a optimally solved lp problem Produce called haplotypes Args: lp_problem (object): solved",
"< len(alt): return f'id{alt[1:]}' # Remove first position else: return f's{alt}' def _mod_tt_record(self,",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use",
"variables haplotypes = [LpVariable(hap, cat = \"Integer\", lowBound=0, upBound=2) for hap in self.haplotypes]",
"[] else: called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if is_ref: possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants)) return",
"= [m[0] for m in matches] self.translation_table[\"STRAND\"] = [m[1] for m in matches]",
"is_ref: possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants)) return possible_haplotypes, haplotype_variants max_opt = hap_prob.objective.value() opt = max_opt while",
"\"\"\" if not self.matched: print(\"You need to run the table_matcher function with genotyped",
"elif len(ref) > len(alt): return \"id-\" elif len(ref) < len(alt): return f'id{alt[1:]}' #",
"haps = [] variants = [] for v in lp_problem.variables(): if v.varValue: if",
"\"-\"]: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand tt_alt_geno = self._mod_tt_record(row.iloc[8], row.iloc[7]) alt_matches = sum([vcf_geno.count(a) for a",
"Returns: [list]: modified allele as list based on iupac \"\"\" alt = alt.strip(\"<>\")",
"haplotypes (may be increased to find novel sub-alleles) # Limit alleles that can",
"defined. \"\"\" pass class Haplotype: def __init__(self, gene: AbstractGene, sample_prefix: str, config =",
"\"STRAND\", \"Type\", \"Variant Start\"]].drop_duplicates() # List of matched variants self.haplotypes = [hap for",
"list of possible haplotypes and list of associated variants \"\"\" possible_haplotypes = []",
"self.genotypes = gene.get_sample_vars(sample_prefix) if len(self.genotypes) == 0: raise NoVariantsException self.translation_table = gene.get_translation_table_copy() self.chromosome",
"be used more than \"MATCH\" hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in",
"sample_prefix self.genotypes = gene.get_sample_vars(sample_prefix) if len(self.genotypes) == 0: raise NoVariantsException self.translation_table = gene.get_translation_table_copy()",
"i: int, default: list) -> list: \"\"\" Helps to assemble the constraint for",
"# Drop haplotypes that don't match 100% self.variants = self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\", \"STRAND\", \"Type\",",
"of possible haplotypes and list of associated variants \"\"\" possible_haplotypes = [] haplotype_variants",
"be used once per haplotype, up to two alleles per variant hap_prob +=",
"[f'id-'] else: try: return [f's{a}' for a in self.config.IUPAC_CODES[alt]] except KeyError: return [f's{alt}']",
"i[1]) for i in haps]).flatten().tolist() if len(called) == 1: called.append(self.reference) return called, variants,",
"position else: return f's{alt}' def _mod_tt_record(self, var_type: str, alt: str) -> list: \"\"\"",
"= 3 return alt_matches, strand def _haps_from_prob(self, lp_problem: object) -> tuple: \"\"\" Take",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"== \"insertion\": return [f'id{alt}'] elif var_type == \"deletion\": return [f'id-'] else: try: return",
"match in a single translation table row with a sample Args: row (pd.core.series.Series):",
"single translation table row with a sample Args: row (pd.core.series.Series): single row from",
"the translation table ref to a standardized form Args: var_type (str): insertion, deletion,",
"are homozygous Returns: list: [description] \"\"\" sc = self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]][\"STRAND\"].unique() sc =",
"translation table with the subject's variants \"\"\" self.matched = True matches = self.translation_table.apply(self._match,",
"choose more than x haplotypes (may be increased to find novel sub-alleles) #",
"alt (str): alt allele ref (str): ref allele Returns: str: reformatted alt allele",
"all are homozygous Returns: list: [description] \"\"\" sc = self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]][\"STRAND\"].unique() sc",
"haplotype classes. Args: gene (Gene): gene.Gene object sample_prefix (str): Sample ID \"\"\" self.phased",
"for hap in self.haplotypes] variants = [LpVariable(var, cat = \"Binary\") for var in",
"k in range(num_haps)))) # A given variant cannot be used more than \"MATCH\"",
"(self.translation_table.iloc[:,0] == self.haplotypes[i]) & (self.translation_table[\"MATCH\"] > 0) ].shape[0] * haplotypes[i] for i in",
"a haplotype, MUST be used # Otherwise, variants like CYP2D6*5 will be missed",
"a sample is attempted that has zero variants defined. \"\"\" pass class Haplotype:",
"= gene.chromosome self.version = gene.version self.reference = gene.reference def table_matcher(self) -> None: \"\"\"",
"from . import LOGGING class NoVariantsException(Exception): \"\"\" Exception to call if a sample",
"Returns: tuple: list of possible haplotypes and list of associated variants \"\"\" possible_haplotypes",
"import AbstractGene from . import LOGGING class NoVariantsException(Exception): \"\"\" Exception to call if",
"def optimize_hap(self) -> (): \"\"\" Solve for the most likely diplotype Returns: ():",
"variant allele can only be used once per haplotype, up to two alleles",
"haplotypes def _mod_vcf_record(self, alt: str, ref: str) -> str: \"\"\" Modifies record from",
"None, None else: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will not be called\") return",
"for k in range(num_haps))) <= self.variants.iloc[i,1] * variants[i]) # Any CNV variants defined,",
"vcf_geno = [self._mod_vcf_record(g, genotype[\"ref\"]) for g in genotype[\"alleles\"]] except AttributeError: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand",
"genotype = self.genotypes[ID] except KeyError: # Not in VCF return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand try:",
"haplotypes[k] for k in range(num_haps)))) # A given variant cannot be used more",
"information \"\"\" is_ref = False haps = [] variants = [] for v",
"f's{alt}' def _mod_tt_record(self, var_type: str, alt: str) -> list: \"\"\" Modifies the translation",
"int, default: list) -> list: \"\"\" Helps to assemble the constraint for phased",
"len(haps), is_ref def _solve(self, hap_prob: object) -> object: if self.solver == \"GLPK\": hap_prob.solve(GLPK(msg=0))",
"allele ref (str): ref allele Returns: str: reformatted alt allele \"\"\" if alt",
"are part of a haplotype Removes homozygous calls Args: i (int): haplotype index",
"found, {self.sample_prefix} will be re-attempted with phasing off.\") return None, None else: LOGGING.warning(f\"No",
"License. import sys import itertools import pandas as pd import numpy as np",
"np.delete(sc, np.where(sc == [3])) return(sc if sc.size > 0 else np.array(default)) def optimize_hap(self)",
"can be chosen based on zygosity for i in range(num_vars): # Iterate over",
"sum([i == k for k in no_match]) > 0: drops.append(i) self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)]",
"else: return f's{alt}' def _mod_tt_record(self, var_type: str, alt: str) -> list: \"\"\" Modifies",
"or implied. # See the License for the specific language governing permissions and",
"the table_matcher function with genotyped before you can optimize\") sys.exit(1) called, variants =",
"def __init__(self, gene: AbstractGene, sample_prefix: str, config = None) -> None: \"\"\" Create",
"(): \"\"\" Solve for the most likely diplotype Returns: (): Results \"\"\" if",
"except KeyError: return [f's{alt}'] def _match(self, row: pd.core.series.Series) -> (int, int): \"\"\" Evaluate",
"and haplotype classes. Args: gene (Gene): gene.Gene object sample_prefix (str): Sample ID \"\"\"",
"for a in tt_alt_geno]) == 1 else -1 elif alt_matches == 2 and",
"{self.sample_prefix} will be re-attempted with phasing off.\") return None, None else: LOGGING.warning(f\"No feasible",
"strand try: vcf_geno = [self._mod_vcf_record(g, genotype[\"ref\"]) for g in genotype[\"alleles\"]] except AttributeError: return",
"range(num_haps)) <= 1 hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps))",
"self.phased = gene.phased self.config = config self.solver = gene.solver self.matched = False self.sample_prefix",
"(object): solved lp problem Returns: tuple: called haplotypes and associated information \"\"\" is_ref",
"for hap in self.translation_table.iloc[:,0].unique().tolist()] # List of possible haplotypes def _mod_vcf_record(self, alt: str,",
"class NoVariantsException(Exception): \"\"\" Exception to call if a sample is attempted that has",
"called haplotypes and associated information \"\"\" is_ref = False haps = [] variants",
"= int(row[\"ID\"].split(\"_\")[1]) - 1 ID = f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else: ID = row[\"ID\"] try: genotype",
"var in self.variants[\"VAR_ID\"]]) hap_prob = LpProblem(\"Haplotype Optimization\", LpMaximize) # Define the haplotypes and",
"A variant allele can only be used once per haplotype, up to two",
"Args: row (pd.core.series.Series): single row from translation table genotypes ([type]): list of genotypes",
"((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) == self.variants.iloc[i,1]) if self.phased: for i",
"= gene.solver self.matched = False self.sample_prefix = sample_prefix self.genotypes = gene.get_sample_vars(sample_prefix) if len(self.genotypes)",
"alt is None: return \"-\" if \"<\" in alt: return f\"s{alt.strip('<>')}\" elif len(ref)",
"with a haplotype, MUST be used # Otherwise, variants like CYP2D6*5 will be",
"called haplotypes Args: lp_problem (object): solved lp problem Returns: tuple: called haplotypes and",
"self._mod_tt_record(row.iloc[8], row.iloc[7]) alt_matches = sum([vcf_geno.count(a) for a in tt_alt_geno]) if alt_matches == 1",
"no_match = self.translation_table[self.translation_table[\"MATCH\"] == 0].iloc[:,0] # Haplotypes where there is any variant not",
"= gene.version self.reference = gene.reference def table_matcher(self) -> None: \"\"\" Matches variants in",
"particular position) \"\"\" strand = 0 if row.iloc[8] in [\"insertion\", \"deletion\"]: new_pos =",
"use this file except in compliance with the License. # You may obtain",
"return [f'id{alt}'] elif var_type == \"deletion\": return [f'id-'] else: try: return [f's{a}' for",
"possible haplotypes and list of associated variants \"\"\" possible_haplotypes = [] haplotype_variants =",
"except AttributeError: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand if vcf_geno == [\"-\", \"-\"]: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand",
"-> None: \"\"\" Matches variants in the translation table with the subject's variants",
"if len(self.genotypes) == 0: raise NoVariantsException self.translation_table = gene.get_translation_table_copy() self.chromosome = gene.chromosome self.version",
"2021 <NAME> # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"max_opt = hap_prob.objective.value() opt = max_opt while opt >= (max_opt - float(self.config.LP_PARAMS[\"optimal_decay\"])) and",
"self.variants.shape[0] num_haps = len(self.haplotypes) hap_vars = [] for hap in self.haplotypes: trans =",
"x haplotypes (may be increased to find novel sub-alleles) # Limit alleles that",
"== 0: raise NoVariantsException self.translation_table = gene.get_translation_table_copy() self.chromosome = gene.chromosome self.version = gene.version",
"subject and haplotype classes. Args: gene (Gene): gene.Gene object sample_prefix (str): Sample ID",
"or if all are homozygous Returns: list: [description] \"\"\" sc = self.translation_table[self.translation_table.iloc[:,0] ==",
"Remove first position else: return f's{alt}' def _mod_tt_record(self, var_type: str, alt: str) ->",
"0 for var in self.variants[\"VAR_ID\"]]) hap_prob = LpProblem(\"Haplotype Optimization\", LpMaximize) # Define the",
"chosen based on zygosity for i in range(num_vars): # Iterate over every variant",
"self.variants[\"VAR_ID\"]] # Set constraint of two haplotypes selected hap_prob += (lpSum(haplotypes[i] for i",
"hap_prob = LpProblem(\"Haplotype Optimization\", LpMaximize) # Define the haplotypes and variants variables haplotypes",
"* haplotypes[k] for k in range(num_haps)))) # A given variant cannot be used",
"object is not a subclass, but inherits data from the Gene class Conceptually,",
"self.solver = gene.solver self.matched = False self.sample_prefix = sample_prefix self.genotypes = gene.get_sample_vars(sample_prefix) if",
"= gene.get_translation_table_copy() self.chromosome = gene.chromosome self.version = gene.version self.reference = gene.reference def table_matcher(self)",
"variants defined, if matched with a haplotype, MUST be used # Otherwise, variants",
"upBound=2) for hap in self.haplotypes] variants = [LpVariable(var, cat = \"Binary\") for var",
"alt allele \"\"\" if alt is None: return \"-\" if \"<\" in alt:",
"solution found, {self.sample_prefix} will not be called\") return [], [] else: called, variants,",
"\"\"\" self.matched = True matches = self.translation_table.apply(self._match, axis = 1) self.translation_table[\"MATCH\"] = [m[0]",
"sum([vcf_geno.count(a) for a in tt_alt_geno]) if alt_matches == 1 and genotype[\"phased\"]: strand =",
"of a haplotype Removes homozygous calls Args: i (int): haplotype index default (list):",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) >= -1 # Set to maximize the",
"for hap in self.haplotypes: trans = self.translation_table[self.translation_table.iloc[:,0] == hap] hap_vars.append([1 if var in",
"np.where(sc == [3])) return(sc if sc.size > 0 else np.array(default)) def optimize_hap(self) ->",
"called, variants = self.lp_hap() if len(called) > 1: LOGGING.warning(f\"Multiple genotypes possible for {self.sample_prefix}.\")",
"list of genotypes Returns: int: 99 (missing), 0, 1, or 2 (corresponds to",
"all strands that are part of a haplotype Removes homozygous calls Args: i",
"= gene.phased self.config = config self.solver = gene.solver self.matched = False self.sample_prefix =",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"# Otherwise, variants like CYP2D6*5 will be missed by the other methods if",
"self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] == 99].tolist()) haps = self.translation_table[\"Haplotype Name\"] no_match = self.translation_table[self.translation_table[\"MATCH\"] == 0].iloc[:,0] #",
"Solve for the most likely diplotype Returns: (): Results \"\"\" if not self.matched:",
"is_ref = self._haps_from_prob(hap_prob) if is_ref: possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants)) return possible_haplotypes, haplotype_variants max_opt = hap_prob.objective.value()",
"Gene class Conceptually, Gene is a fairly abstract class that has meta information",
"(str): Sample ID \"\"\" self.phased = gene.phased self.config = config self.solver = gene.solver",
"called is None: # Happens when a phased call attempt fails self.phased =",
"Produce called haplotypes Args: lp_problem (object): solved lp problem Returns: tuple: called haplotypes",
"* haplotypes[k] for k in range(num_haps))) <= self.variants.iloc[i,1] * variants[i]) # Any CNV",
"([type]): list of genotypes Returns: int: 99 (missing), 0, 1, or 2 (corresponds",
"v.varValue > 0: if v.name.split(\"_\")[0] == f'c{self.chromosome}': variants.append(v.name) else: haps.append((v.name, v.varValue)) if len(haps)",
"with the License. # You may obtain a copy of the License at",
"haps = self.translation_table[\"Haplotype Name\"] no_match = self.translation_table[self.translation_table[\"MATCH\"] == 0].iloc[:,0] # Haplotypes where there",
"solution found, {self.sample_prefix} will be re-attempted with phasing off.\") return None, None else:",
"return alt_matches, strand def _haps_from_prob(self, lp_problem: object) -> tuple: \"\"\" Take a optimally",
"# Any CNV variants defined, if matched with a haplotype, MUST be used",
"or 2 (corresponds to the number of matched alleles for a particular position)",
"[] variants = [] for v in lp_problem.variables(): if v.varValue: if v.varValue >",
"by the other methods if self.variants.iloc[i,3] == \"CNV\": hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k]",
"law or agreed to in writing, software # distributed under the License is",
"haplotype, up to two alleles per variant hap_prob += (variants[i] <= (lpSum(hap_vars[k][i] *",
"cat = \"Binary\") for var in self.variants[\"VAR_ID\"]] # Set constraint of two haplotypes",
"= False haps = [] variants = [] for v in lp_problem.variables(): if",
"hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) >= -1 #",
"False self.sample_prefix = sample_prefix self.genotypes = gene.get_sample_vars(sample_prefix) if len(self.genotypes) == 0: raise NoVariantsException",
"0: possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants))) hap_prob += lpSum([h.value() * h for h in haplotypes]) <=",
"_get_strand_constraint(self, i: int, default: list) -> list: \"\"\" Helps to assemble the constraint",
"list: \"\"\" Modifies the translation table ref to a standardized form Args: var_type",
"subject's variants \"\"\" self.matched = True matches = self.translation_table.apply(self._match, axis = 1) self.translation_table[\"MATCH\"]",
"ID \"\"\" self.phased = gene.phased self.config = config self.solver = gene.solver self.matched =",
"is attempted that has zero variants defined. \"\"\" pass class Haplotype: def __init__(self,",
"range(num_haps)) <= int(self.config.LP_PARAMS[\"max_haps\"])) # Cannot choose more than x haplotypes (may be increased",
"but inherits data from the Gene class Conceptually, Gene is a fairly abstract",
"in compliance with the License. # You may obtain a copy of the",
"Conceptually, Gene is a fairly abstract class that has meta information used by",
"used more than \"MATCH\" hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps)))",
"import sys import itertools import pandas as pd import numpy as np from",
"k in range(num_haps))) == self.variants.iloc[i,1]) if self.phased: for i in range(num_haps): hap_prob +=",
"if v.varValue > 0: if v.name.split(\"_\")[0] == f'c{self.chromosome}': variants.append(v.name) else: haps.append((v.name, v.varValue)) if",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"in range(num_haps))) == self.variants.iloc[i,1]) if self.phased: for i in range(num_haps): hap_prob += lpSum(haplotypes[i]",
"class Haplotype: def __init__(self, gene: AbstractGene, sample_prefix: str, config = None) -> None:",
"import itertools import pandas as pd import numpy as np from pulp import",
"KeyError: # Not in VCF return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand try: vcf_geno = [self._mod_vcf_record(g, genotype[\"ref\"])",
"= new_called return possible_haplotypes, haplotype_variants def _get_strand_constraint(self, i: int, default: list) -> list:",
"var_type == \"deletion\": return [f'id-'] else: try: return [f's{a}' for a in self.config.IUPAC_CODES[alt]]",
"return [], [] else: called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if is_ref: possible_haplotypes.append(tuple(called))",
"in no_match]) > 0: drops.append(i) self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop haplotypes that don't",
"not self.matched: print(\"You need to run the table_matcher function with genotyped before you",
"(int): haplotype index default (list): default return if nothing matches or if all",
"meta information used by the subject and haplotype classes. Args: gene (Gene): gene.Gene",
"range(num_haps)))) # A given variant cannot be used more than \"MATCH\" hap_prob +=",
"2: called = [haps[0][0], haps[1][0]] else: called = np.array([np.repeat(i[0], i[1]) for i in",
"two alleles per variant hap_prob += (variants[i] <= (lpSum(hap_vars[k][i] * haplotypes[k] for k",
"self._solve(hap_prob) if hap_prob.status != 1: if self.phased: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will",
"== 0].iloc[:,0] # Haplotypes where there is any variant not matching drops =",
"matches or if all are homozygous Returns: list: [description] \"\"\" sc = self.translation_table[self.translation_table.iloc[:,0]",
"a in self.config.IUPAC_CODES[alt]] except KeyError: return [f's{alt}'] def _match(self, row: pd.core.series.Series) -> (int,",
"0: break called = new_called return possible_haplotypes, haplotype_variants def _get_strand_constraint(self, i: int, default:",
"\"Variant Start\"]].drop_duplicates() # List of matched variants self.haplotypes = [hap for hap in",
"default return if nothing matches or if all are homozygous Returns: list: [description]",
"haplotypes]) <= hap_len - 1 self._solve(hap_prob) if hap_prob.status != 1: break opt =",
"methods if self.variants.iloc[i,3] == \"CNV\": hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in",
"hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, []).size) <= 1 # max one strand hap_prob",
"for var in self.variants[\"VAR_ID\"]]) hap_prob = LpProblem(\"Haplotype Optimization\", LpMaximize) # Define the haplotypes",
"may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required",
"pandas as pd import numpy as np from pulp import * from .gene",
"the number of matched alleles for a particular position) \"\"\" strand = 0",
"if new_called == called or len(new_called) == 0: break called = new_called return",
"= gene.get_sample_vars(sample_prefix) if len(self.genotypes) == 0: raise NoVariantsException self.translation_table = gene.get_translation_table_copy() self.chromosome =",
"Args: alt (str): alt allele ref (str): ref allele Returns: str: reformatted alt",
"alt.strip(\"<>\") if var_type == \"insertion\": return [f'id{alt}'] elif var_type == \"deletion\": return [f'id-']",
"[self.reference, self.reference] is_ref = True elif len(haps) == 2: called = [haps[0][0], haps[1][0]]",
"(Gene): gene.Gene object sample_prefix (str): Sample ID \"\"\" self.phased = gene.phased self.config =",
"= self.translation_table.apply(self._match, axis = 1) self.translation_table[\"MATCH\"] = [m[0] for m in matches] self.translation_table[\"STRAND\"]",
"for g in genotype[\"alleles\"]] except AttributeError: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand if vcf_geno == [\"-\",",
"hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) <= self.variants.iloc[i,1] * variants[i])",
"Returns: str: reformatted alt allele \"\"\" if alt is None: return \"-\" if",
"_haps_from_prob(self, lp_problem: object) -> tuple: \"\"\" Take a optimally solved lp problem Produce",
"drops.append(i) self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop haplotypes that don't match 100% self.variants =",
"allele as list based on iupac \"\"\" alt = alt.strip(\"<>\") if var_type ==",
"[f's{alt}'] def _match(self, row: pd.core.series.Series) -> (int, int): \"\"\" Evaluate match in a",
"== self.variants.iloc[i,1]) if self.phased: for i in range(num_haps): hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i,",
"== \"CNV\": hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) == self.variants.iloc[i,1])",
"(): Results \"\"\" if not self.matched: print(\"You need to run the table_matcher function",
"\"\"\" Exception to call if a sample is attempted that has zero variants",
"optimize\") sys.exit(1) called, variants = self.lp_hap() if called is None: # Happens when",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"self.phased: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will be re-attempted with phasing off.\") return",
"ID = row[\"ID\"] try: genotype = self.genotypes[ID] except KeyError: # Not in VCF",
"vcf_geno == [\"-\", \"-\"]: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand tt_alt_geno = self._mod_tt_record(row.iloc[8], row.iloc[7]) alt_matches =",
"Returns: int: 99 (missing), 0, 1, or 2 (corresponds to the number of",
"1: called.append(self.reference) return called, variants, len(haps), is_ref def _solve(self, hap_prob: object) -> object:",
"novel sub-alleles) # Limit alleles that can be chosen based on zygosity for",
"import LOGGING class NoVariantsException(Exception): \"\"\" Exception to call if a sample is attempted",
"language governing permissions and # limitations under the License. import sys import itertools",
"lowBound=0, upBound=2) for hap in self.haplotypes] variants = [LpVariable(var, cat = \"Binary\") for",
"= LpProblem(\"Haplotype Optimization\", LpMaximize) # Define the haplotypes and variants variables haplotypes =",
"diplotype Returns: (): Results \"\"\" if not self.matched: print(\"You need to run the",
"this file except in compliance with the License. # You may obtain a",
"number of variant alleles used hap_prob += lpSum( self.translation_table[ (self.translation_table.iloc[:,0] == self.haplotypes[i]) &",
"new_called == called or len(new_called) == 0: break called = new_called return possible_haplotypes,",
"for k in range(num_haps)))) # A given variant cannot be used more than",
"# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing,",
"self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\", \"STRAND\", \"Type\", \"Variant Start\"]].drop_duplicates() # List of matched variants self.haplotypes =",
"attempt fails self.phased = False called, variants = self.lp_hap() if len(called) > 1:",
"import * from .gene import AbstractGene from . import LOGGING class NoVariantsException(Exception): \"\"\"",
"NoVariantsException(Exception): \"\"\" Exception to call if a sample is attempted that has zero",
"table ref to a standardized form Args: var_type (str): insertion, deletion, or substitution",
"[3])) return(sc if sc.size > 0 else np.array(default)) def optimize_hap(self) -> (): \"\"\"",
"self.translation_table.apply(self._match, axis = 1) self.translation_table[\"MATCH\"] = [m[0] for m in matches] self.translation_table[\"STRAND\"] =",
"[0])[0] for i in range(num_haps)) >= -1 # Set to maximize the number",
"f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else: ID = row[\"ID\"] try: genotype = self.genotypes[ID] except KeyError: # Not",
"at all strands that are part of a haplotype Removes homozygous calls Args:",
"variants in the translation table with the subject's variants \"\"\" self.matched = True",
"haplotype_variants max_opt = hap_prob.objective.value() opt = max_opt while opt >= (max_opt - float(self.config.LP_PARAMS[\"optimal_decay\"]))",
"i in range(num_vars): # Iterate over every variant # A variant allele can",
"hap_prob += lpSum([h.value() * h for h in haplotypes]) <= hap_len - 1",
"a new haplotype object This object is not a subclass, but inherits data",
"<= 1 hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) >=",
"table genotypes ([type]): list of genotypes Returns: int: 99 (missing), 0, 1, or",
"and hap_prob.status >= 0: possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants))) hap_prob += lpSum([h.value() * h for h",
"m in matches] self.translation_table[\"VAR_ID\"] = self.translation_table.apply( lambda x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis = 1 )",
"\"\"\" Modifies the translation table ref to a standardized form Args: var_type (str):",
"in VCF return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand try: vcf_geno = [self._mod_vcf_record(g, genotype[\"ref\"]) for g in",
"cat = \"Integer\", lowBound=0, upBound=2) for hap in self.haplotypes] variants = [LpVariable(var, cat",
">= -1 # Set to maximize the number of variant alleles used hap_prob",
"= gene.reference def table_matcher(self) -> None: \"\"\" Matches variants in the translation table",
"LP problem Returns: tuple: list of possible haplotypes and list of associated variants",
"tt_alt_geno = self._mod_tt_record(row.iloc[8], row.iloc[7]) alt_matches = sum([vcf_geno.count(a) for a in tt_alt_geno]) if alt_matches",
"off.\") return None, None else: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will not be",
"3 return alt_matches, strand def _haps_from_prob(self, lp_problem: object) -> tuple: \"\"\" Take a",
"NoVariantsException self.translation_table = gene.get_translation_table_copy() self.chromosome = gene.chromosome self.version = gene.version self.reference = gene.reference",
"else: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will not be called\") return [], []",
"m in matches] self.translation_table[\"STRAND\"] = [m[1] for m in matches] self.translation_table[\"VAR_ID\"] = self.translation_table.apply(",
"haplotype object This object is not a subclass, but inherits data from the",
"\"deletion\"]: new_pos = int(row[\"ID\"].split(\"_\")[1]) - 1 ID = f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else: ID = row[\"ID\"]",
"possible_haplotypes = [] haplotype_variants = [] num_vars = self.variants.shape[0] num_haps = len(self.haplotypes) hap_vars",
"row: pd.core.series.Series) -> (int, int): \"\"\" Evaluate match in a single translation table",
"else 0 for var in self.variants[\"VAR_ID\"]]) hap_prob = LpProblem(\"Haplotype Optimization\", LpMaximize) # Define",
"strand tt_alt_geno = self._mod_tt_record(row.iloc[8], row.iloc[7]) alt_matches = sum([vcf_geno.count(a) for a in tt_alt_geno]) if",
"Returns: (): Results \"\"\" if not self.matched: print(\"You need to run the table_matcher",
"num_vars = self.variants.shape[0] num_haps = len(self.haplotypes) hap_vars = [] for hap in self.haplotypes:",
"= alt.strip(\"<>\") if var_type == \"insertion\": return [f'id{alt}'] elif var_type == \"deletion\": return",
"strand = 1 if max([vcf_geno.index(a) for a in tt_alt_geno]) == 1 else -1",
"else: called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if is_ref: possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants)) return possible_haplotypes,",
"> 0: drops.append(i) self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop haplotypes that don't match 100%",
"is_ref = self._haps_from_prob(hap_prob) if new_called == called or len(new_called) == 0: break called",
"!= 1: break opt = hap_prob.objective.value() new_called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if",
"[LpVariable(var, cat = \"Binary\") for var in self.variants[\"VAR_ID\"]] # Set constraint of two",
"<reponame>sadams2013/hiMoon # Copyright 2021 <NAME> # Licensed under the Apache License, Version 2.0",
"= 0 if row.iloc[8] in [\"insertion\", \"deletion\"]: new_pos = int(row[\"ID\"].split(\"_\")[1]) - 1 ID",
"variants defined. \"\"\" pass class Haplotype: def __init__(self, gene: AbstractGene, sample_prefix: str, config",
"if vcf_geno == [\"-\", \"-\"]: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand tt_alt_geno = self._mod_tt_record(row.iloc[8], row.iloc[7]) alt_matches",
"Results \"\"\" if not self.matched: print(\"You need to run the table_matcher function with",
"haplotype_variants def _get_strand_constraint(self, i: int, default: list) -> list: \"\"\" Helps to assemble",
"required by applicable law or agreed to in writing, software # distributed under",
"if v.name.split(\"_\")[0] == f'c{self.chromosome}': variants.append(v.name) else: haps.append((v.name, v.varValue)) if len(haps) == 0: called",
"[m[1] for m in matches] self.translation_table[\"VAR_ID\"] = self.translation_table.apply( lambda x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis =",
"gene.version self.reference = gene.reference def table_matcher(self) -> None: \"\"\" Matches variants in the",
"break opt = hap_prob.objective.value() new_called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if new_called ==",
"self.lp_hap() if len(called) > 1: LOGGING.warning(f\"Multiple genotypes possible for {self.sample_prefix}.\") return called, variants",
"a fairly abstract class that has meta information used by the subject and",
"of two haplotypes selected hap_prob += (lpSum(haplotypes[i] for i in range(num_haps)) <= int(self.config.LP_PARAMS[\"max_haps\"]))",
"== \"GLPK\": hap_prob.solve(GLPK(msg=0)) else: hap_prob.solve(PULP_CBC_CMD(msg=0)) def lp_hap(self) -> tuple: \"\"\" Build and run",
"range(num_haps))) == self.variants.iloc[i,1]) if self.phased: for i in range(num_haps): hap_prob += lpSum(haplotypes[i] *",
"phased data Looks at all strands that are part of a haplotype Removes",
"up to two alleles per variant hap_prob += (variants[i] <= (lpSum(hap_vars[k][i] * haplotypes[k]",
"with genotyped before you can optimize\") sys.exit(1) called, variants = self.lp_hap() if called",
"* haplotypes[i] for i in range(num_haps)) self._solve(hap_prob) if hap_prob.status != 1: if self.phased:",
"translation table Returns: [list]: modified allele as list based on iupac \"\"\" alt",
"[self._mod_vcf_record(g, genotype[\"ref\"]) for g in genotype[\"alleles\"]] except AttributeError: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand if vcf_geno",
"-1 # Set to maximize the number of variant alleles used hap_prob +=",
"Exception to call if a sample is attempted that has zero variants defined.",
"1: if self.phased: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will be re-attempted with phasing",
"== 99].tolist()) haps = self.translation_table[\"Haplotype Name\"] no_match = self.translation_table[self.translation_table[\"MATCH\"] == 0].iloc[:,0] # Haplotypes",
"gene.phased self.config = config self.solver = gene.solver self.matched = False self.sample_prefix = sample_prefix",
"homozygous calls Args: i (int): haplotype index default (list): default return if nothing",
"print(\"You need to run the table_matcher function with genotyped before you can optimize\")",
"translation table row with a sample Args: row (pd.core.series.Series): single row from translation",
"= self.translation_table.apply( lambda x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis = 1 ) self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] ==",
"== 2: called = [haps[0][0], haps[1][0]] else: called = np.array([np.repeat(i[0], i[1]) for i",
"& (self.translation_table[\"MATCH\"] > 0) ].shape[0] * haplotypes[i] for i in range(num_haps)) self._solve(hap_prob) if",
"\"\"\" pass class Haplotype: def __init__(self, gene: AbstractGene, sample_prefix: str, config = None)",
"lp problem Returns: tuple: called haplotypes and associated information \"\"\" is_ref = False",
"of possible haplotypes def _mod_vcf_record(self, alt: str, ref: str) -> str: \"\"\" Modifies",
"Not in VCF return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand try: vcf_geno = [self._mod_vcf_record(g, genotype[\"ref\"]) for g",
"number of matched alleles for a particular position) \"\"\" strand = 0 if",
"haplotypes[i] for i in range(num_haps)) self._solve(hap_prob) if hap_prob.status != 1: if self.phased: LOGGING.warning(f\"No",
"List of possible haplotypes def _mod_vcf_record(self, alt: str, ref: str) -> str: \"\"\"",
"need to run the table_matcher function with genotyped before you can optimize\") sys.exit(1)",
"hap_prob.status >= 0: possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants))) hap_prob += lpSum([h.value() * h for h in",
".gene import AbstractGene from . import LOGGING class NoVariantsException(Exception): \"\"\" Exception to call",
"haplotypes that don't match 100% self.variants = self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\", \"STRAND\", \"Type\", \"Variant Start\"]].drop_duplicates()",
"pd.core.series.Series) -> (int, int): \"\"\" Evaluate match in a single translation table row",
"alt = alt.strip(\"<>\") if var_type == \"insertion\": return [f'id{alt}'] elif var_type == \"deletion\":",
"var in trans[\"VAR_ID\"].unique() else 0 for var in self.variants[\"VAR_ID\"]]) hap_prob = LpProblem(\"Haplotype Optimization\",",
"-> None: \"\"\" Create a new haplotype object This object is not a",
"== [\"-\", \"-\"]: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand tt_alt_geno = self._mod_tt_record(row.iloc[8], row.iloc[7]) alt_matches = sum([vcf_geno.count(a)",
"lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) <= 1 hap_prob += lpSum(haplotypes[i]",
"run the LP problem Returns: tuple: list of possible haplotypes and list of",
"self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] == 99].tolist()) haps = self.translation_table[\"Haplotype Name\"] no_match = self.translation_table[self.translation_table[\"MATCH\"] ==",
"i in range(num_haps)) <= 1 hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i",
"alt: str, ref: str) -> str: \"\"\" Modifies record from VCF to standardized",
"int: 99 (missing), 0, 1, or 2 (corresponds to the number of matched",
"np.array([np.repeat(i[0], i[1]) for i in haps]).flatten().tolist() if len(called) == 1: called.append(self.reference) return called,",
"max([vcf_geno.index(a) for a in tt_alt_geno]) == 1 else -1 elif alt_matches == 2",
"that has zero variants defined. \"\"\" pass class Haplotype: def __init__(self, gene: AbstractGene,",
"\"MATCH\", \"STRAND\", \"Type\", \"Variant Start\"]].drop_duplicates() # List of matched variants self.haplotypes = [hap",
"1) self.translation_table[\"MATCH\"] = [m[0] for m in matches] self.translation_table[\"STRAND\"] = [m[1] for m",
"in genotype[\"alleles\"]] except AttributeError: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand if vcf_geno == [\"-\", \"-\"]: return",
"100% self.variants = self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\", \"STRAND\", \"Type\", \"Variant Start\"]].drop_duplicates() # List of matched",
"for a in self.config.IUPAC_CODES[alt]] except KeyError: return [f's{alt}'] def _match(self, row: pd.core.series.Series) ->",
"matches = self.translation_table.apply(self._match, axis = 1) self.translation_table[\"MATCH\"] = [m[0] for m in matches]",
"more than x haplotypes (may be increased to find novel sub-alleles) # Limit",
"hap] hap_vars.append([1 if var in trans[\"VAR_ID\"].unique() else 0 for var in self.variants[\"VAR_ID\"]]) hap_prob",
"= f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else: ID = row[\"ID\"] try: genotype = self.genotypes[ID] except KeyError: #",
"trans = self.translation_table[self.translation_table.iloc[:,0] == hap] hap_vars.append([1 if var in trans[\"VAR_ID\"].unique() else 0 for",
"strands that are part of a haplotype Removes homozygous calls Args: i (int):",
"== f'c{self.chromosome}': variants.append(v.name) else: haps.append((v.name, v.varValue)) if len(haps) == 0: called = [self.reference,",
"position) \"\"\" strand = 0 if row.iloc[8] in [\"insertion\", \"deletion\"]: new_pos = int(row[\"ID\"].split(\"_\")[1])",
"list of associated variants \"\"\" possible_haplotypes = [] haplotype_variants = [] num_vars =",
"in haps]).flatten().tolist() if len(called) == 1: called.append(self.reference) return called, variants, len(haps), is_ref def",
"the number of variant alleles used hap_prob += lpSum( self.translation_table[ (self.translation_table.iloc[:,0] == self.haplotypes[i])",
"# you may not use this file except in compliance with the License.",
"not matching drops = [] for i in no_match.unique(): if sum([i == k",
"calls Args: i (int): haplotype index default (list): default return if nothing matches",
"-> (): \"\"\" Solve for the most likely diplotype Returns: (): Results \"\"\"",
"+= (variants[i] <= (lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps)))) # A given",
"> 0 else np.array(default)) def optimize_hap(self) -> (): \"\"\" Solve for the most",
"table_matcher(self) -> None: \"\"\" Matches variants in the translation table with the subject's",
"data Looks at all strands that are part of a haplotype Removes homozygous",
"= self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop haplotypes that don't match 100% self.variants = self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\",",
"Haplotype: def __init__(self, gene: AbstractGene, sample_prefix: str, config = None) -> None: \"\"\"",
"len(self.genotypes) == 0: raise NoVariantsException self.translation_table = gene.get_translation_table_copy() self.chromosome = gene.chromosome self.version =",
"[f'id{alt}'] elif var_type == \"deletion\": return [f'id-'] else: try: return [f's{a}' for a",
"range(num_haps))) <= self.variants.iloc[i,1] * variants[i]) # Any CNV variants defined, if matched with",
"called.append(self.reference) return called, variants, len(haps), is_ref def _solve(self, hap_prob: object) -> object: if",
"used once per haplotype, up to two alleles per variant hap_prob += (variants[i]",
"= True matches = self.translation_table.apply(self._match, axis = 1) self.translation_table[\"MATCH\"] = [m[0] for m",
"self.translation_table[\"VAR_ID\"] = self.translation_table.apply( lambda x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis = 1 ) self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"]",
"int(row[\"ID\"].split(\"_\")[1]) - 1 ID = f'{row[\"ID\"].split(\"_\")[0]}_{new_pos}_SID' else: ID = row[\"ID\"] try: genotype =",
"# Remove first position else: return f's{alt}' def _mod_tt_record(self, var_type: str, alt: str)",
"is any variant not matching drops = [] for i in no_match.unique(): if",
"(int, int): \"\"\" Evaluate match in a single translation table row with a",
"drops = [] for i in no_match.unique(): if sum([i == k for k",
"if alt is None: return \"-\" if \"<\" in alt: return f\"s{alt.strip('<>')}\" elif",
"self.haplotypes = [hap for hap in self.translation_table.iloc[:,0].unique().tolist()] # List of possible haplotypes def",
"2 (corresponds to the number of matched alleles for a particular position) \"\"\"",
"int): \"\"\" Evaluate match in a single translation table row with a sample",
"a single translation table row with a sample Args: row (pd.core.series.Series): single row",
"to the number of matched alleles for a particular position) \"\"\" strand =",
"> len(alt): return \"id-\" elif len(ref) < len(alt): return f'id{alt[1:]}' # Remove first",
"given variant cannot be used more than \"MATCH\" hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k]",
"variants.append(v.name) else: haps.append((v.name, v.varValue)) if len(haps) == 0: called = [self.reference, self.reference] is_ref",
"+= lpSum(haplotypes[i] * self._get_strand_constraint(i, []).size) <= 1 # max one strand hap_prob +=",
"inherits data from the Gene class Conceptually, Gene is a fairly abstract class",
"(pd.core.series.Series): single row from translation table genotypes ([type]): list of genotypes Returns: int:",
"License for the specific language governing permissions and # limitations under the License.",
"tt_alt_geno]) == 1 else -1 elif alt_matches == 2 and genotype[\"phased\"]: strand =",
"and run the LP problem Returns: tuple: list of possible haplotypes and list",
"strand hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) <= 1",
"find novel sub-alleles) # Limit alleles that can be chosen based on zygosity",
"genotyped before you can optimize\") sys.exit(1) called, variants = self.lp_hap() if called is",
"+= lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) <= 1 hap_prob +=",
"no_match]) > 0: drops.append(i) self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop haplotypes that don't match",
"\"License\"); # you may not use this file except in compliance with the",
"return if nothing matches or if all are homozygous Returns: list: [description] \"\"\"",
"elif var_type == \"deletion\": return [f'id-'] else: try: return [f's{a}' for a in",
"haplotypes and variants variables haplotypes = [LpVariable(hap, cat = \"Integer\", lowBound=0, upBound=2) for",
"genotypes ([type]): list of genotypes Returns: int: 99 (missing), 0, 1, or 2",
"# Happens when a phased call attempt fails self.phased = False called, variants",
"self.variants.iloc[i,3] == \"CNV\": hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) ==",
"in range(num_haps)) <= int(self.config.LP_PARAMS[\"max_haps\"])) # Cannot choose more than x haplotypes (may be",
"a sample Args: row (pd.core.series.Series): single row from translation table genotypes ([type]): list",
"per haplotype, up to two alleles per variant hap_prob += (variants[i] <= (lpSum(hap_vars[k][i]",
"in self.variants[\"VAR_ID\"]] # Set constraint of two haplotypes selected hap_prob += (lpSum(haplotypes[i] for",
"for the most likely diplotype Returns: (): Results \"\"\" if not self.matched: print(\"You",
"hap_prob.status != 1: if self.phased: LOGGING.warning(f\"No feasible solution found, {self.sample_prefix} will be re-attempted",
"-> list: \"\"\" Helps to assemble the constraint for phased data Looks at",
"the haplotypes and variants variables haplotypes = [LpVariable(hap, cat = \"Integer\", lowBound=0, upBound=2)",
"for i in haps]).flatten().tolist() if len(called) == 1: called.append(self.reference) return called, variants, len(haps),",
"in writing, software # distributed under the License is distributed on an \"AS",
"that are part of a haplotype Removes homozygous calls Args: i (int): haplotype",
"<= hap_len - 1 self._solve(hap_prob) if hap_prob.status != 1: break opt = hap_prob.objective.value()",
"i in haps]).flatten().tolist() if len(called) == 1: called.append(self.reference) return called, variants, len(haps), is_ref",
"A given variant cannot be used more than \"MATCH\" hap_prob += ((lpSum(hap_vars[k][i] *",
"hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) <= 1 hap_prob",
"\"-\" if \"<\" in alt: return f\"s{alt.strip('<>')}\" elif len(ref) > len(alt): return \"id-\"",
"[], [] else: called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if is_ref: possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants))",
"opt = max_opt while opt >= (max_opt - float(self.config.LP_PARAMS[\"optimal_decay\"])) and not is_ref and",
"like CYP2D6*5 will be missed by the other methods if self.variants.iloc[i,3] == \"CNV\":",
"* haplotypes[k] for k in range(num_haps))) == self.variants.iloc[i,1]) if self.phased: for i in",
"pulp import * from .gene import AbstractGene from . import LOGGING class NoVariantsException(Exception):",
"substitution alt (str): allele from translation table Returns: [list]: modified allele as list",
"self._get_strand_constraint(i, []).size) <= 1 # max one strand hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i,",
"for h in haplotypes]) <= hap_len - 1 self._solve(hap_prob) if hap_prob.status != 1:",
"= config self.solver = gene.solver self.matched = False self.sample_prefix = sample_prefix self.genotypes =",
"return \"-\" if \"<\" in alt: return f\"s{alt.strip('<>')}\" elif len(ref) > len(alt): return",
"if len(called) == 1: called.append(self.reference) return called, variants, len(haps), is_ref def _solve(self, hap_prob:",
"likely diplotype Returns: (): Results \"\"\" if not self.matched: print(\"You need to run",
"itertools import pandas as pd import numpy as np from pulp import *",
"alt: return f\"s{alt.strip('<>')}\" elif len(ref) > len(alt): return \"id-\" elif len(ref) < len(alt):",
"insertion, deletion, or substitution alt (str): allele from translation table Returns: [list]: modified",
"lpSum(haplotypes[i] * self._get_strand_constraint(i, []).size) <= 1 # max one strand hap_prob += lpSum(haplotypes[i]",
"more than \"MATCH\" hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) <=",
"max_opt while opt >= (max_opt - float(self.config.LP_PARAMS[\"optimal_decay\"])) and not is_ref and hap_prob.status >=",
"(self.translation_table[\"MATCH\"] > 0) ].shape[0] * haplotypes[i] for i in range(num_haps)) self._solve(hap_prob) if hap_prob.status",
"to call if a sample is attempted that has zero variants defined. \"\"\"",
"else: hap_prob.solve(PULP_CBC_CMD(msg=0)) def lp_hap(self) -> tuple: \"\"\" Build and run the LP problem",
"else: called = np.array([np.repeat(i[0], i[1]) for i in haps]).flatten().tolist() if len(called) == 1:",
"-> (int, int): \"\"\" Evaluate match in a single translation table row with",
"subclass, but inherits data from the Gene class Conceptually, Gene is a fairly",
"var_type == \"insertion\": return [f'id{alt}'] elif var_type == \"deletion\": return [f'id-'] else: try:",
"that has meta information used by the subject and haplotype classes. Args: gene",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"i in range(num_haps): hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, []).size) <= 1 # max",
"in self.haplotypes] variants = [LpVariable(var, cat = \"Binary\") for var in self.variants[\"VAR_ID\"]] #",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"= sum([vcf_geno.count(a) for a in tt_alt_geno]) if alt_matches == 1 and genotype[\"phased\"]: strand",
"new_called return possible_haplotypes, haplotype_variants def _get_strand_constraint(self, i: int, default: list) -> list: \"\"\"",
"= [] variants = [] for v in lp_problem.variables(): if v.varValue: if v.varValue",
"str, config = None) -> None: \"\"\" Create a new haplotype object This",
"the LP problem Returns: tuple: list of possible haplotypes and list of associated",
"haplotype_variants.append(tuple(variants)) return possible_haplotypes, haplotype_variants max_opt = hap_prob.objective.value() opt = max_opt while opt >=",
"express or implied. # See the License for the specific language governing permissions",
"1 hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) >= -1",
"modified allele as list based on iupac \"\"\" alt = alt.strip(\"<>\") if var_type",
"_match(self, row: pd.core.series.Series) -> (int, int): \"\"\" Evaluate match in a single translation",
"((lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps))) <= self.variants.iloc[i,1] * variants[i]) # Any",
"= [self.reference, self.reference] is_ref = True elif len(haps) == 2: called = [haps[0][0],",
"row with a sample Args: row (pd.core.series.Series): single row from translation table genotypes",
"for k in no_match]) > 0: drops.append(i) self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop haplotypes",
"either express or implied. # See the License for the specific language governing",
"be called\") return [], [] else: called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if",
"variants = [] for v in lp_problem.variables(): if v.varValue: if v.varValue > 0:",
"gene.Gene object sample_prefix (str): Sample ID \"\"\" self.phased = gene.phased self.config = config",
"every variant # A variant allele can only be used once per haplotype,",
"f'c{self.chromosome}': variants.append(v.name) else: haps.append((v.name, v.varValue)) if len(haps) == 0: called = [self.reference, self.reference]",
"there is any variant not matching drops = [] for i in no_match.unique():",
"variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if is_ref: possible_haplotypes.append(tuple(called)) haplotype_variants.append(tuple(variants)) return possible_haplotypes, haplotype_variants max_opt",
"# Cannot choose more than x haplotypes (may be increased to find novel",
"in range(num_haps)) >= -1 # Set to maximize the number of variant alleles",
"the License. # You may obtain a copy of the License at #",
"\"Type\", \"Variant Start\"]].drop_duplicates() # List of matched variants self.haplotypes = [hap for hap",
"== [3])) return(sc if sc.size > 0 else np.array(default)) def optimize_hap(self) -> ():",
"self.translation_table.apply( lambda x: f'{x[\"ID\"]}_{str(x.iloc[6]).strip(\"<>\")}_{str(x.iloc[7]).strip(\"<>\")}', axis = 1 ) self.translation_table = self.translation_table.drop(self.translation_table.index[self.translation_table[\"MATCH\"] == 99].tolist())",
"# Not in VCF return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand try: vcf_geno = [self._mod_vcf_record(g, genotype[\"ref\"]) for",
"is_ref = True elif len(haps) == 2: called = [haps[0][0], haps[1][0]] else: called",
"num_haps = len(self.haplotypes) hap_vars = [] for hap in self.haplotypes: trans = self.translation_table[self.translation_table.iloc[:,0]",
"for a in tt_alt_geno]) if alt_matches == 1 and genotype[\"phased\"]: strand = 1",
"(may be increased to find novel sub-alleles) # Limit alleles that can be",
"by the subject and haplotype classes. Args: gene (Gene): gene.Gene object sample_prefix (str):",
"other methods if self.variants.iloc[i,3] == \"CNV\": hap_prob += ((lpSum(hap_vars[k][i] * haplotypes[k] for k",
"You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless",
"if sum([i == k for k in no_match]) > 0: drops.append(i) self.translation_table =",
"= self._haps_from_prob(hap_prob) if new_called == called or len(new_called) == 0: break called =",
"-1 elif alt_matches == 2 and genotype[\"phased\"]: strand = 3 return alt_matches, strand",
"constraint of two haplotypes selected hap_prob += (lpSum(haplotypes[i] for i in range(num_haps)) <=",
"solved lp problem Returns: tuple: called haplotypes and associated information \"\"\" is_ref =",
"standardized form Args: alt (str): alt allele ref (str): ref allele Returns: str:",
"table row with a sample Args: row (pd.core.series.Series): single row from translation table",
"Any CNV variants defined, if matched with a haplotype, MUST be used #",
"Helps to assemble the constraint for phased data Looks at all strands that",
"Drop haplotypes that don't match 100% self.variants = self.translation_table.loc[:,[\"VAR_ID\", \"MATCH\", \"STRAND\", \"Type\", \"Variant",
"and list of associated variants \"\"\" possible_haplotypes = [] haplotype_variants = [] num_vars",
"function with genotyped before you can optimize\") sys.exit(1) called, variants = self.lp_hap() if",
"default: list) -> list: \"\"\" Helps to assemble the constraint for phased data",
"np.array(default)) def optimize_hap(self) -> (): \"\"\" Solve for the most likely diplotype Returns:",
"Sample ID \"\"\" self.phased = gene.phased self.config = config self.solver = gene.solver self.matched",
"solved lp problem Produce called haplotypes Args: lp_problem (object): solved lp problem Returns:",
"has zero variants defined. \"\"\" pass class Haplotype: def __init__(self, gene: AbstractGene, sample_prefix:",
"used # Otherwise, variants like CYP2D6*5 will be missed by the other methods",
"-> str: \"\"\" Modifies record from VCF to standardized form Args: alt (str):",
"# A given variant cannot be used more than \"MATCH\" hap_prob += ((lpSum(hap_vars[k][i]",
"lp_problem: object) -> tuple: \"\"\" Take a optimally solved lp problem Produce called",
"== self.haplotypes[i]][\"STRAND\"].unique() sc = np.delete(sc, np.where(sc == [3])) return(sc if sc.size > 0",
"def _mod_tt_record(self, var_type: str, alt: str) -> list: \"\"\" Modifies the translation table",
"= [LpVariable(hap, cat = \"Integer\", lowBound=0, upBound=2) for hap in self.haplotypes] variants =",
"list) -> list: \"\"\" Helps to assemble the constraint for phased data Looks",
"one strand hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) <=",
"in haplotypes]) <= hap_len - 1 self._solve(hap_prob) if hap_prob.status != 1: break opt",
"= np.array([np.repeat(i[0], i[1]) for i in haps]).flatten().tolist() if len(called) == 1: called.append(self.reference) return",
"haplotypes[k] for k in range(num_haps))) <= self.variants.iloc[i,1] * variants[i]) # Any CNV variants",
"possible_haplotypes, haplotype_variants max_opt = hap_prob.objective.value() opt = max_opt while opt >= (max_opt -",
"haps.append((v.name, v.varValue)) if len(haps) == 0: called = [self.reference, self.reference] is_ref = True",
"hap_len - 1 self._solve(hap_prob) if hap_prob.status != 1: break opt = hap_prob.objective.value() new_called,",
"sample is attempted that has zero variants defined. \"\"\" pass class Haplotype: def",
"http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software",
"<= (lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps)))) # A given variant cannot",
"i in range(num_haps)) >= -1 # Set to maximize the number of variant",
"= [self._mod_vcf_record(g, genotype[\"ref\"]) for g in genotype[\"alleles\"]] except AttributeError: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand if",
"hap_prob += (variants[i] <= (lpSum(hap_vars[k][i] * haplotypes[k] for k in range(num_haps)))) # A",
"+= lpSum(haplotypes[i] * self._get_strand_constraint(i, [0])[0] for i in range(num_haps)) >= -1 # Set",
"translation table genotypes ([type]): list of genotypes Returns: int: 99 (missing), 0, 1,",
"= [hap for hap in self.translation_table.iloc[:,0].unique().tolist()] # List of possible haplotypes def _mod_vcf_record(self,",
"attempted that has zero variants defined. \"\"\" pass class Haplotype: def __init__(self, gene:",
"return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand if vcf_geno == [\"-\", \"-\"]: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand tt_alt_geno =",
"matching drops = [] for i in no_match.unique(): if sum([i == k for",
"of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or",
"ref allele Returns: str: reformatted alt allele \"\"\" if alt is None: return",
"deletion, or substitution alt (str): allele from translation table Returns: [list]: modified allele",
"\"<\" in alt: return f\"s{alt.strip('<>')}\" elif len(ref) > len(alt): return \"id-\" elif len(ref)",
"= \"Binary\") for var in self.variants[\"VAR_ID\"]] # Set constraint of two haplotypes selected",
"<NAME> # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"break called = new_called return possible_haplotypes, haplotype_variants def _get_strand_constraint(self, i: int, default: list)",
"be increased to find novel sub-alleles) # Limit alleles that can be chosen",
"= False self.sample_prefix = sample_prefix self.genotypes = gene.get_sample_vars(sample_prefix) if len(self.genotypes) == 0: raise",
"- float(self.config.LP_PARAMS[\"optimal_decay\"])) and not is_ref and hap_prob.status >= 0: possible_haplotypes.append(tuple(sorted(called))) haplotype_variants.append(tuple(sorted(variants))) hap_prob +=",
"be re-attempted with phasing off.\") return None, None else: LOGGING.warning(f\"No feasible solution found,",
"# Define the haplotypes and variants variables haplotypes = [LpVariable(hap, cat = \"Integer\",",
"return called, variants, len(haps), is_ref def _solve(self, hap_prob: object) -> object: if self.solver",
"in range(num_haps): hap_prob += lpSum(haplotypes[i] * self._get_strand_constraint(i, []).size) <= 1 # max one",
"problem Returns: tuple: list of possible haplotypes and list of associated variants \"\"\"",
"MUST be used # Otherwise, variants like CYP2D6*5 will be missed by the",
"except in compliance with the License. # You may obtain a copy of",
"new_called, variants, hap_len, is_ref = self._haps_from_prob(hap_prob) if new_called == called or len(new_called) ==",
"\"\"\" sc = self.translation_table[self.translation_table.iloc[:,0] == self.haplotypes[i]][\"STRAND\"].unique() sc = np.delete(sc, np.where(sc == [3])) return(sc",
"genotype[\"ref\"]) for g in genotype[\"alleles\"]] except AttributeError: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand if vcf_geno ==",
"return f's{alt}' def _mod_tt_record(self, var_type: str, alt: str) -> list: \"\"\" Modifies the",
"= self.genotypes[ID] except KeyError: # Not in VCF return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand try: vcf_geno",
"self.translation_table = self.translation_table[~self.translation_table.iloc[:,0].isin(drops)] # Drop haplotypes that don't match 100% self.variants = self.translation_table.loc[:,[\"VAR_ID\",",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"than x haplotypes (may be increased to find novel sub-alleles) # Limit alleles",
"of matched alleles for a particular position) \"\"\" strand = 0 if row.iloc[8]",
"\"insertion\": return [f'id{alt}'] elif var_type == \"deletion\": return [f'id-'] else: try: return [f's{a}'",
"[] num_vars = self.variants.shape[0] num_haps = len(self.haplotypes) hap_vars = [] for hap in",
"True elif len(haps) == 2: called = [haps[0][0], haps[1][0]] else: called = np.array([np.repeat(i[0],",
"variants = [LpVariable(var, cat = \"Binary\") for var in self.variants[\"VAR_ID\"]] # Set constraint",
"a in tt_alt_geno]) if alt_matches == 1 and genotype[\"phased\"]: strand = 1 if",
"return f\"s{alt.strip('<>')}\" elif len(ref) > len(alt): return \"id-\" elif len(ref) < len(alt): return",
"== called or len(new_called) == 0: break called = new_called return possible_haplotypes, haplotype_variants",
"Haplotypes where there is any variant not matching drops = [] for i",
"genotype[\"alleles\"]] except AttributeError: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]), strand if vcf_geno == [\"-\", \"-\"]: return int(self.config.MISSING_DATA_PARAMETERS[\"missing_variants\"]),",
"0: if v.name.split(\"_\")[0] == f'c{self.chromosome}': variants.append(v.name) else: haps.append((v.name, v.varValue)) if len(haps) == 0:",
"with the subject's variants \"\"\" self.matched = True matches = self.translation_table.apply(self._match, axis =",
"alt_matches == 1 and genotype[\"phased\"]: strand = 1 if max([vcf_geno.index(a) for a in",
"call if a sample is attempted that has zero variants defined. \"\"\" pass",
"= self.variants.shape[0] num_haps = len(self.haplotypes) hap_vars = [] for hap in self.haplotypes: trans",
"matched with a haplotype, MUST be used # Otherwise, variants like CYP2D6*5 will",
"ref to a standardized form Args: var_type (str): insertion, deletion, or substitution alt",
"v.varValue: if v.varValue > 0: if v.name.split(\"_\")[0] == f'c{self.chromosome}': variants.append(v.name) else: haps.append((v.name, v.varValue))",
"None: \"\"\" Create a new haplotype object This object is not a subclass,",
"self.config.IUPAC_CODES[alt]] except KeyError: return [f's{alt}'] def _match(self, row: pd.core.series.Series) -> (int, int): \"\"\"",
"# A variant allele can only be used once per haplotype, up to",
"haplotype, MUST be used # Otherwise, variants like CYP2D6*5 will be missed by",
"and genotype[\"phased\"]: strand = 1 if max([vcf_geno.index(a) for a in tt_alt_geno]) == 1",
"Gene is a fairly abstract class that has meta information used by the",
"called or len(new_called) == 0: break called = new_called return possible_haplotypes, haplotype_variants def",
"def _mod_vcf_record(self, alt: str, ref: str) -> str: \"\"\" Modifies record from VCF",
"range(num_vars): # Iterate over every variant # A variant allele can only be",
"a in tt_alt_geno]) == 1 else -1 elif alt_matches == 2 and genotype[\"phased\"]:",
"list: \"\"\" Helps to assemble the constraint for phased data Looks at all"
] |
[
"nearest_interpolator from pioneer.das.api.samples import Sample, XYZVI from pioneer.das.api.sensors.sensor import Sensor class RadarConti(Sensor): def",
"def __init__(self, name, platform): super(RadarConti, self).__init__(name, platform, {'xyzvi': (XYZVI, nearest_interpolator)}) self.amplitude_type = 'velocity'",
"from pioneer.das.api.samples import Sample, XYZVI from pioneer.das.api.sensors.sensor import Sensor class RadarConti(Sensor): def __init__(self,",
"from pioneer.das.api.interpolators import nearest_interpolator from pioneer.das.api.samples import Sample, XYZVI from pioneer.das.api.sensors.sensor import Sensor",
"Sensor class RadarConti(Sensor): def __init__(self, name, platform): super(RadarConti, self).__init__(name, platform, {'xyzvi': (XYZVI, nearest_interpolator)})",
"nearest_interpolator)}) self.amplitude_type = 'velocity' # types = ['i', 'velocity'] def get_corrected_cloud(self, _timestamp, pts,",
"RadarConti(Sensor): def __init__(self, name, platform): super(RadarConti, self).__init__(name, platform, {'xyzvi': (XYZVI, nearest_interpolator)}) self.amplitude_type =",
"self).__init__(name, platform, {'xyzvi': (XYZVI, nearest_interpolator)}) self.amplitude_type = 'velocity' # types = ['i', 'velocity']",
"import nearest_interpolator from pioneer.das.api.samples import Sample, XYZVI from pioneer.das.api.sensors.sensor import Sensor class RadarConti(Sensor):",
"import Sensor class RadarConti(Sensor): def __init__(self, name, platform): super(RadarConti, self).__init__(name, platform, {'xyzvi': (XYZVI,",
"platform): super(RadarConti, self).__init__(name, platform, {'xyzvi': (XYZVI, nearest_interpolator)}) self.amplitude_type = 'velocity' # types =",
"from pioneer.das.api.sensors.sensor import Sensor class RadarConti(Sensor): def __init__(self, name, platform): super(RadarConti, self).__init__(name, platform,",
"self.amplitude_type = 'velocity' # types = ['i', 'velocity'] def get_corrected_cloud(self, _timestamp, pts, _dtype):",
"Sample, XYZVI from pioneer.das.api.sensors.sensor import Sensor class RadarConti(Sensor): def __init__(self, name, platform): super(RadarConti,",
"super(RadarConti, self).__init__(name, platform, {'xyzvi': (XYZVI, nearest_interpolator)}) self.amplitude_type = 'velocity' # types = ['i',",
"'velocity' # types = ['i', 'velocity'] def get_corrected_cloud(self, _timestamp, pts, _dtype): return pts",
"(XYZVI, nearest_interpolator)}) self.amplitude_type = 'velocity' # types = ['i', 'velocity'] def get_corrected_cloud(self, _timestamp,",
"<reponame>leddartech/pioneer.das.api from pioneer.das.api.interpolators import nearest_interpolator from pioneer.das.api.samples import Sample, XYZVI from pioneer.das.api.sensors.sensor import",
"name, platform): super(RadarConti, self).__init__(name, platform, {'xyzvi': (XYZVI, nearest_interpolator)}) self.amplitude_type = 'velocity' # types",
"= 'velocity' # types = ['i', 'velocity'] def get_corrected_cloud(self, _timestamp, pts, _dtype): return",
"__init__(self, name, platform): super(RadarConti, self).__init__(name, platform, {'xyzvi': (XYZVI, nearest_interpolator)}) self.amplitude_type = 'velocity' #",
"import Sample, XYZVI from pioneer.das.api.sensors.sensor import Sensor class RadarConti(Sensor): def __init__(self, name, platform):",
"{'xyzvi': (XYZVI, nearest_interpolator)}) self.amplitude_type = 'velocity' # types = ['i', 'velocity'] def get_corrected_cloud(self,",
"pioneer.das.api.sensors.sensor import Sensor class RadarConti(Sensor): def __init__(self, name, platform): super(RadarConti, self).__init__(name, platform, {'xyzvi':",
"XYZVI from pioneer.das.api.sensors.sensor import Sensor class RadarConti(Sensor): def __init__(self, name, platform): super(RadarConti, self).__init__(name,",
"pioneer.das.api.interpolators import nearest_interpolator from pioneer.das.api.samples import Sample, XYZVI from pioneer.das.api.sensors.sensor import Sensor class",
"class RadarConti(Sensor): def __init__(self, name, platform): super(RadarConti, self).__init__(name, platform, {'xyzvi': (XYZVI, nearest_interpolator)}) self.amplitude_type",
"platform, {'xyzvi': (XYZVI, nearest_interpolator)}) self.amplitude_type = 'velocity' # types = ['i', 'velocity'] def",
"pioneer.das.api.samples import Sample, XYZVI from pioneer.das.api.sensors.sensor import Sensor class RadarConti(Sensor): def __init__(self, name,"
] |
[
"t, startX=n.offset - NOTE_DURATION - t, endX=n.offset - t, startY=-1 + en /",
"122673, 122752, 122831, 123068, 123147, 123226, 123304, 123383, 123539, 123618, 123696, 123773, 123851,",
"124084, 124162, 124239, 124316, 124471, 124547, 124624, 124701, 124778, 124932, 125008, 125084, 125160,",
"119661, 119827, 121953, 122114, 122275, 122434, 122594, 122673, 122752, 122831, 123068, 123147, 123226,",
"reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent from reamber.osu.OsuMap import OsuMap # notes: 01:37:742 (97742|2,125moves993|2) -",
"123773, 123851, 124007, 124084, 124162, 124239, 124316, 124471, 124547, 124624, 124701, 124778, 124932,",
"else 1) * ( np.piecewise(x, [(i <= x) & (x < i +",
"def f247(m: OsuMap): notes = sorted([n for n in m.notes.hits() if 97742 <",
"pi import numpy as np from aleph.consts import * from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD,",
"* INC_SHAKE_AMP) * np.sin((x - i) * pi / (SHAKE_WINDOW - es *",
"pi / (SHAKE_WINDOW - es * 3)) for es, i in enumerate(SHAKES)], lambda",
"SHAKE_WINDOW) for i in SHAKES], [*[lambda x, i=i, es=es: (BASE_SHAKE_AMP + es *",
"OsuMap # notes: 01:37:742 (97742|2,125moves993|2) - SHAKES = np.array( [100560, 100790, 101018, 101245,",
"board if it's < 2 (-1 if n.column < 2 else 1) *",
"t, endX=n.offset - t, startY=-1 + en / 500 , endY=1 - en",
"122752, 122831, 123068, 123147, 123226, 123304, 123383, 123539, 123618, 123696, 123773, 123851, 124007,",
"= 250 NOTE_DURATION = 2000 # noinspection PyTypeChecker events = [ *[SvOsuMeasureLineEvent( firstOffset=n.offset",
"i in SHAKES], [*[lambda x, i=i, es=es: (BASE_SHAKE_AMP + es * INC_SHAKE_AMP) *",
"125993]) def f247(m: OsuMap): notes = sorted([n for n in m.notes.hits() if 97742",
"111252, 111348, 113698, 113882, 114065, 114248, 116577, 116753, 116928, 117103, 119326, 119494, 119661,",
"- es * 3)) for es, i in enumerate(SHAKES)], lambda x: 0]) +",
"SHAKES], [*[lambda x, i=i, es=es: (BASE_SHAKE_AMP + es * INC_SHAKE_AMP) * np.sin((x -",
"+ es * INC_SHAKE_AMP) * np.sin((x - i) * pi / (SHAKE_WINDOW -",
"enumerate(notes) for t in np.linspace(0, 24, NOTE_THICKNESS)] ] svs, bpms = svOsuMeasureLineMD(events, scalingFactor=SCALE,",
"as np from aleph.consts import * from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent from reamber.osu.OsuMap",
"from math import pi import numpy as np from aleph.consts import * from",
"i + SHAKE_WINDOW) for i in SHAKES], [*[lambda x, i=i, es=es: (BASE_SHAKE_AMP +",
"startX=n.offset - NOTE_DURATION - t, endX=n.offset - t, startY=-1 + en / 500",
"- t, lastOffset=n.offset - t, startX=n.offset - NOTE_DURATION - t, endX=n.offset - t,",
"(x - (n.offset - t)) / NOTE_DURATION ) ]) for en, n in",
"from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent from reamber.osu.OsuMap import OsuMap # notes: 01:37:742 (97742|2,125moves993|2)",
"124932, 125008, 125084, 125160, 125236, 125388, 125464, 125540, 125616, 125692, 125767, 125842, 125918,",
"NOTE_DURATION - t, endX=n.offset - t, startY=-1 + en / 500 , endY=1",
"- NOTE_DURATION - t, lastOffset=n.offset - t, startX=n.offset - NOTE_DURATION - t, endX=n.offset",
"* ( np.piecewise(x, [(i <= x) & (x < i + SHAKE_WINDOW) for",
"/ NOTE_DURATION ) ]) for en, n in enumerate(notes) for t in np.linspace(0,",
"124316, 124471, 124547, 124624, 124701, 124778, 124932, 125008, 125084, 125160, 125236, 125388, 125464,",
"np.sin((x - i) * pi / (SHAKE_WINDOW - es * 3)) for es,",
"125540, 125616, 125692, 125767, 125842, 125918, 125993]) def f247(m: OsuMap): notes = sorted([n",
"es=es: (BASE_SHAKE_AMP + es * INC_SHAKE_AMP) * np.sin((x - i) * pi /",
"124778, 124932, 125008, 125084, 125160, 125236, 125388, 125464, 125540, 125616, 125692, 125767, 125842,",
"124701, 124778, 124932, 125008, 125084, 125160, 125236, 125388, 125464, 125540, 125616, 125692, 125767,",
"125842, 125918, 125993]) def f247(m: OsuMap): notes = sorted([n for n in m.notes.hits()",
"events = [ *[SvOsuMeasureLineEvent( firstOffset=n.offset - NOTE_DURATION - t, lastOffset=n.offset - t, startX=n.offset",
"(SHAKE_WINDOW - es * 3)) for es, i in enumerate(SHAKES)], lambda x: 0])",
"- t)) / NOTE_DURATION ) ]) for en, n in enumerate(notes) for t",
"125993]) BASE_SHAKE_AMP = 0.010 INC_SHAKE_AMP = 0.0010 SHAKE_WINDOW = 250 NOTE_DURATION = 2000",
"PyTypeChecker events = [ *[SvOsuMeasureLineEvent( firstOffset=n.offset - NOTE_DURATION - t, lastOffset=n.offset - t,",
"in enumerate(notes) for t in np.linspace(0, 24, NOTE_THICKNESS)] ] svs, bpms = svOsuMeasureLineMD(events,",
"0.0010 SHAKE_WINDOW = 250 NOTE_DURATION = 2000 # noinspection PyTypeChecker events = [",
"n=n, t=t: # This flips the board if it's < 2 (-1 if",
"in m.notes.hits() if 97742 < n.offset <= 125993]) BASE_SHAKE_AMP = 0.010 INC_SHAKE_AMP =",
"# noinspection PyTypeChecker events = [ *[SvOsuMeasureLineEvent( firstOffset=n.offset - NOTE_DURATION - t, lastOffset=n.offset",
"<= 125993]) BASE_SHAKE_AMP = 0.010 INC_SHAKE_AMP = 0.0010 SHAKE_WINDOW = 250 NOTE_DURATION =",
"116928, 117103, 119326, 119494, 119661, 119827, 121953, 122114, 122275, 122434, 122594, 122673, 122752,",
"24, NOTE_THICKNESS)] ] svs, bpms = svOsuMeasureLineMD(events, scalingFactor=SCALE, firstOffset=97742, lastOffset=125993, paddingSize=PADDING, endBpm=250) m.svs.extend(svs)",
"SHAKES = np.array( [100560, 100790, 101018, 101245, 104124, 104340, 104556, 104770, 107487, 107692,",
"121953, 122114, 122275, 122434, 122594, 122673, 122752, 122831, 123068, 123147, 123226, 123304, 123383,",
"122434, 122594, 122673, 122752, 122831, 123068, 123147, 123226, 123304, 123383, 123539, 123618, 123696,",
"123539, 123618, 123696, 123773, 123851, 124007, 124084, 124162, 124239, 124316, 124471, 124547, 124624,",
"BASE_SHAKE_AMP = 0.010 INC_SHAKE_AMP = 0.0010 SHAKE_WINDOW = 250 NOTE_DURATION = 2000 #",
"= 0.010 INC_SHAKE_AMP = 0.0010 SHAKE_WINDOW = 250 NOTE_DURATION = 2000 # noinspection",
"< i + SHAKE_WINDOW) for i in SHAKES], [*[lambda x, i=i, es=es: (BASE_SHAKE_AMP",
"t, lastOffset=n.offset - t, startX=n.offset - NOTE_DURATION - t, endX=n.offset - t, startY=-1",
"104556, 104770, 107487, 107692, 107896, 108099, 110674, 110867, 111059, 111156, 111252, 111348, 113698,",
"/ (SHAKE_WINDOW - es * 3)) for es, i in enumerate(SHAKES)], lambda x:",
"INC_SHAKE_AMP) * np.sin((x - i) * pi / (SHAKE_WINDOW - es * 3))",
"122594, 122673, 122752, 122831, 123068, 123147, 123226, 123304, 123383, 123539, 123618, 123696, 123773,",
"- t, endX=n.offset - t, startY=-1 + en / 500 , endY=1 -",
"113698, 113882, 114065, 114248, 116577, 116753, 116928, 117103, 119326, 119494, 119661, 119827, 121953,",
"104124, 104340, 104556, 104770, 107487, 107692, 107896, 108099, 110674, 110867, 111059, 111156, 111252,",
"i=i, es=es: (BASE_SHAKE_AMP + es * INC_SHAKE_AMP) * np.sin((x - i) * pi",
"es * INC_SHAKE_AMP) * np.sin((x - i) * pi / (SHAKE_WINDOW - es",
"]) for en, n in enumerate(notes) for t in np.linspace(0, 24, NOTE_THICKNESS)] ]",
", endY=1 - en / 500, funcs=[ lambda x, n=n, t=t: # This",
"notes = sorted([n for n in m.notes.hits() if 97742 < n.offset <= 125993])",
"110867, 111059, 111156, 111252, 111348, 113698, 113882, 114065, 114248, 116577, 116753, 116928, 117103,",
"111156, 111252, 111348, 113698, 113882, 114065, 114248, 116577, 116753, 116928, 117103, 119326, 119494,",
"math import pi import numpy as np from aleph.consts import * from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD",
"firstOffset=n.offset - NOTE_DURATION - t, lastOffset=n.offset - t, startX=n.offset - NOTE_DURATION - t,",
"NOTE_DURATION - t, lastOffset=n.offset - t, startX=n.offset - NOTE_DURATION - t, endX=n.offset -",
"numpy as np from aleph.consts import * from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent from",
"125236, 125388, 125464, 125540, 125616, 125692, 125767, 125842, 125918, 125993]) def f247(m: OsuMap):",
"125918, 125993]) def f247(m: OsuMap): notes = sorted([n for n in m.notes.hits() if",
"[100560, 100790, 101018, 101245, 104124, 104340, 104556, 104770, 107487, 107692, 107896, 108099, 110674,",
"funcs=[ lambda x, n=n, t=t: # This flips the board if it's <",
"124547, 124624, 124701, 124778, 124932, 125008, 125084, 125160, 125236, 125388, 125464, 125540, 125616,",
"104770, 107487, 107692, 107896, 108099, 110674, 110867, 111059, 111156, 111252, 111348, 113698, 113882,",
"lastOffset=n.offset - t, startX=n.offset - NOTE_DURATION - t, endX=n.offset - t, startY=-1 +",
"i) * pi / (SHAKE_WINDOW - es * 3)) for es, i in",
"es * 3)) for es, i in enumerate(SHAKES)], lambda x: 0]) + (x",
"125692, 125767, 125842, 125918, 125993]) def f247(m: OsuMap): notes = sorted([n for n",
"for en, n in enumerate(notes) for t in np.linspace(0, 24, NOTE_THICKNESS)] ] svs,",
"endX=n.offset - t, startY=-1 + en / 500 , endY=1 - en /",
"107487, 107692, 107896, 108099, 110674, 110867, 111059, 111156, 111252, 111348, 113698, 113882, 114065,",
"101018, 101245, 104124, 104340, 104556, 104770, 107487, 107692, 107896, 108099, 110674, 110867, 111059,",
"i in enumerate(SHAKES)], lambda x: 0]) + (x - (n.offset - t)) /",
"/ 500, funcs=[ lambda x, n=n, t=t: # This flips the board if",
"0.010 INC_SHAKE_AMP = 0.0010 SHAKE_WINDOW = 250 NOTE_DURATION = 2000 # noinspection PyTypeChecker",
"- t, startX=n.offset - NOTE_DURATION - t, endX=n.offset - t, startY=-1 + en",
"sorted([n for n in m.notes.hits() if 97742 < n.offset <= 125993]) BASE_SHAKE_AMP =",
"flips the board if it's < 2 (-1 if n.column < 2 else",
"116753, 116928, 117103, 119326, 119494, 119661, 119827, 121953, 122114, 122275, 122434, 122594, 122673,",
"119326, 119494, 119661, 119827, 121953, 122114, 122275, 122434, 122594, 122673, 122752, 122831, 123068,",
"113882, 114065, 114248, 116577, 116753, 116928, 117103, 119326, 119494, 119661, 119827, 121953, 122114,",
"124162, 124239, 124316, 124471, 124547, 124624, 124701, 124778, 124932, 125008, 125084, 125160, 125236,",
"for n in m.notes.hits() if 97742 < n.offset <= 125993]) BASE_SHAKE_AMP = 0.010",
"NOTE_DURATION ) ]) for en, n in enumerate(notes) for t in np.linspace(0, 24,",
"+ (x - (n.offset - t)) / NOTE_DURATION ) ]) for en, n",
"es, i in enumerate(SHAKES)], lambda x: 0]) + (x - (n.offset - t))",
"np.linspace(0, 24, NOTE_THICKNESS)] ] svs, bpms = svOsuMeasureLineMD(events, scalingFactor=SCALE, firstOffset=97742, lastOffset=125993, paddingSize=PADDING, endBpm=250)",
"+ en / 500 , endY=1 - en / 500, funcs=[ lambda x,",
"import * from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent from reamber.osu.OsuMap import OsuMap # notes:",
"123696, 123773, 123851, 124007, 124084, 124162, 124239, 124316, 124471, 124547, 124624, 124701, 124778,",
") ]) for en, n in enumerate(notes) for t in np.linspace(0, 24, NOTE_THICKNESS)]",
"en / 500 , endY=1 - en / 500, funcs=[ lambda x, n=n,",
"500 , endY=1 - en / 500, funcs=[ lambda x, n=n, t=t: #",
"* 3)) for es, i in enumerate(SHAKES)], lambda x: 0]) + (x -",
"- t, startY=-1 + en / 500 , endY=1 - en / 500,",
"en, n in enumerate(notes) for t in np.linspace(0, 24, NOTE_THICKNESS)] ] svs, bpms",
"from reamber.osu.OsuMap import OsuMap # notes: 01:37:742 (97742|2,125moves993|2) - SHAKES = np.array( [100560,",
"125160, 125236, 125388, 125464, 125540, 125616, 125692, 125767, 125842, 125918, 125993]) def f247(m:",
"125084, 125160, 125236, 125388, 125464, 125540, 125616, 125692, 125767, 125842, 125918, 125993]) def",
"m.notes.hits() if 97742 < n.offset <= 125993]) BASE_SHAKE_AMP = 0.010 INC_SHAKE_AMP = 0.0010",
"x, i=i, es=es: (BASE_SHAKE_AMP + es * INC_SHAKE_AMP) * np.sin((x - i) *",
"122275, 122434, 122594, 122673, 122752, 122831, 123068, 123147, 123226, 123304, 123383, 123539, 123618,",
"if it's < 2 (-1 if n.column < 2 else 1) * (",
"(BASE_SHAKE_AMP + es * INC_SHAKE_AMP) * np.sin((x - i) * pi / (SHAKE_WINDOW",
"119827, 121953, 122114, 122275, 122434, 122594, 122673, 122752, 122831, 123068, 123147, 123226, 123304,",
"- NOTE_DURATION - t, endX=n.offset - t, startY=-1 + en / 500 ,",
"t, startY=-1 + en / 500 , endY=1 - en / 500, funcs=[",
"= 2000 # noinspection PyTypeChecker events = [ *[SvOsuMeasureLineEvent( firstOffset=n.offset - NOTE_DURATION -",
"svOsuMeasureLineMD, SvOsuMeasureLineEvent from reamber.osu.OsuMap import OsuMap # notes: 01:37:742 (97742|2,125moves993|2) - SHAKES =",
"startY=-1 + en / 500 , endY=1 - en / 500, funcs=[ lambda",
"123383, 123539, 123618, 123696, 123773, 123851, 124007, 124084, 124162, 124239, 124316, 124471, 124547,",
"125767, 125842, 125918, 125993]) def f247(m: OsuMap): notes = sorted([n for n in",
"np.array( [100560, 100790, 101018, 101245, 104124, 104340, 104556, 104770, 107487, 107692, 107896, 108099,",
"125616, 125692, 125767, 125842, 125918, 125993]) def f247(m: OsuMap): notes = sorted([n for",
"lambda x: 0]) + (x - (n.offset - t)) / NOTE_DURATION ) ])",
"3)) for es, i in enumerate(SHAKES)], lambda x: 0]) + (x - (n.offset",
"104340, 104556, 104770, 107487, 107692, 107896, 108099, 110674, 110867, 111059, 111156, 111252, 111348,",
"it's < 2 (-1 if n.column < 2 else 1) * ( np.piecewise(x,",
"< n.offset <= 125993]) BASE_SHAKE_AMP = 0.010 INC_SHAKE_AMP = 0.0010 SHAKE_WINDOW = 250",
"125388, 125464, 125540, 125616, 125692, 125767, 125842, 125918, 125993]) def f247(m: OsuMap): notes",
"1) * ( np.piecewise(x, [(i <= x) & (x < i + SHAKE_WINDOW)",
"x: 0]) + (x - (n.offset - t)) / NOTE_DURATION ) ]) for",
"123147, 123226, 123304, 123383, 123539, 123618, 123696, 123773, 123851, 124007, 124084, 124162, 124239,",
"125464, 125540, 125616, 125692, 125767, 125842, 125918, 125993]) def f247(m: OsuMap): notes =",
"114248, 116577, 116753, 116928, 117103, 119326, 119494, 119661, 119827, 121953, 122114, 122275, 122434,",
"123618, 123696, 123773, 123851, 124007, 124084, 124162, 124239, 124316, 124471, 124547, 124624, 124701,",
"110674, 110867, 111059, 111156, 111252, 111348, 113698, 113882, 114065, 114248, 116577, 116753, 116928,",
"n.offset <= 125993]) BASE_SHAKE_AMP = 0.010 INC_SHAKE_AMP = 0.0010 SHAKE_WINDOW = 250 NOTE_DURATION",
"2 else 1) * ( np.piecewise(x, [(i <= x) & (x < i",
"<= x) & (x < i + SHAKE_WINDOW) for i in SHAKES], [*[lambda",
"* np.sin((x - i) * pi / (SHAKE_WINDOW - es * 3)) for",
"for es, i in enumerate(SHAKES)], lambda x: 0]) + (x - (n.offset -",
"for t in np.linspace(0, 24, NOTE_THICKNESS)] ] svs, bpms = svOsuMeasureLineMD(events, scalingFactor=SCALE, firstOffset=97742,",
"< 2 else 1) * ( np.piecewise(x, [(i <= x) & (x <",
"import svOsuMeasureLineMD, SvOsuMeasureLineEvent from reamber.osu.OsuMap import OsuMap # notes: 01:37:742 (97742|2,125moves993|2) - SHAKES",
"aleph.consts import * from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent from reamber.osu.OsuMap import OsuMap #",
"= [ *[SvOsuMeasureLineEvent( firstOffset=n.offset - NOTE_DURATION - t, lastOffset=n.offset - t, startX=n.offset -",
"np.piecewise(x, [(i <= x) & (x < i + SHAKE_WINDOW) for i in",
"100790, 101018, 101245, 104124, 104340, 104556, 104770, 107487, 107692, 107896, 108099, 110674, 110867,",
"n in enumerate(notes) for t in np.linspace(0, 24, NOTE_THICKNESS)] ] svs, bpms =",
"[ *[SvOsuMeasureLineEvent( firstOffset=n.offset - NOTE_DURATION - t, lastOffset=n.offset - t, startX=n.offset - NOTE_DURATION",
"noinspection PyTypeChecker events = [ *[SvOsuMeasureLineEvent( firstOffset=n.offset - NOTE_DURATION - t, lastOffset=n.offset -",
"(-1 if n.column < 2 else 1) * ( np.piecewise(x, [(i <= x)",
"for i in SHAKES], [*[lambda x, i=i, es=es: (BASE_SHAKE_AMP + es * INC_SHAKE_AMP)",
"notes: 01:37:742 (97742|2,125moves993|2) - SHAKES = np.array( [100560, 100790, 101018, 101245, 104124, 104340,",
"NOTE_THICKNESS)] ] svs, bpms = svOsuMeasureLineMD(events, scalingFactor=SCALE, firstOffset=97742, lastOffset=125993, paddingSize=PADDING, endBpm=250) m.svs.extend(svs) m.bpms.extend(bpms)",
"= np.array( [100560, 100790, 101018, 101245, 104124, 104340, 104556, 104770, 107487, 107692, 107896,",
"108099, 110674, 110867, 111059, 111156, 111252, 111348, 113698, 113882, 114065, 114248, 116577, 116753,",
"123851, 124007, 124084, 124162, 124239, 124316, 124471, 124547, 124624, 124701, 124778, 124932, 125008,",
"import pi import numpy as np from aleph.consts import * from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import",
"/ 500 , endY=1 - en / 500, funcs=[ lambda x, n=n, t=t:",
"+ SHAKE_WINDOW) for i in SHAKES], [*[lambda x, i=i, es=es: (BASE_SHAKE_AMP + es",
"( np.piecewise(x, [(i <= x) & (x < i + SHAKE_WINDOW) for i",
"enumerate(SHAKES)], lambda x: 0]) + (x - (n.offset - t)) / NOTE_DURATION )",
"* from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent from reamber.osu.OsuMap import OsuMap # notes: 01:37:742",
"INC_SHAKE_AMP = 0.0010 SHAKE_WINDOW = 250 NOTE_DURATION = 2000 # noinspection PyTypeChecker events",
"- i) * pi / (SHAKE_WINDOW - es * 3)) for es, i",
"the board if it's < 2 (-1 if n.column < 2 else 1)",
"122114, 122275, 122434, 122594, 122673, 122752, 122831, 123068, 123147, 123226, 123304, 123383, 123539,",
"114065, 114248, 116577, 116753, 116928, 117103, 119326, 119494, 119661, 119827, 121953, 122114, 122275,",
"t)) / NOTE_DURATION ) ]) for en, n in enumerate(notes) for t in",
"t in np.linspace(0, 24, NOTE_THICKNESS)] ] svs, bpms = svOsuMeasureLineMD(events, scalingFactor=SCALE, firstOffset=97742, lastOffset=125993,",
"107692, 107896, 108099, 110674, 110867, 111059, 111156, 111252, 111348, 113698, 113882, 114065, 114248,",
"2000 # noinspection PyTypeChecker events = [ *[SvOsuMeasureLineEvent( firstOffset=n.offset - NOTE_DURATION - t,",
"OsuMap): notes = sorted([n for n in m.notes.hits() if 97742 < n.offset <=",
"SvOsuMeasureLineEvent from reamber.osu.OsuMap import OsuMap # notes: 01:37:742 (97742|2,125moves993|2) - SHAKES = np.array(",
"111348, 113698, 113882, 114065, 114248, 116577, 116753, 116928, 117103, 119326, 119494, 119661, 119827,",
"reamber.osu.OsuMap import OsuMap # notes: 01:37:742 (97742|2,125moves993|2) - SHAKES = np.array( [100560, 100790,",
"if 97742 < n.offset <= 125993]) BASE_SHAKE_AMP = 0.010 INC_SHAKE_AMP = 0.0010 SHAKE_WINDOW",
"in enumerate(SHAKES)], lambda x: 0]) + (x - (n.offset - t)) / NOTE_DURATION",
"from aleph.consts import * from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent from reamber.osu.OsuMap import OsuMap",
"import numpy as np from aleph.consts import * from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent",
"125008, 125084, 125160, 125236, 125388, 125464, 125540, 125616, 125692, 125767, 125842, 125918, 125993])",
"in SHAKES], [*[lambda x, i=i, es=es: (BASE_SHAKE_AMP + es * INC_SHAKE_AMP) * np.sin((x",
"# This flips the board if it's < 2 (-1 if n.column <",
"01:37:742 (97742|2,125moves993|2) - SHAKES = np.array( [100560, 100790, 101018, 101245, 104124, 104340, 104556,",
"np from aleph.consts import * from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent from reamber.osu.OsuMap import",
"- (n.offset - t)) / NOTE_DURATION ) ]) for en, n in enumerate(notes)",
"SHAKE_WINDOW = 250 NOTE_DURATION = 2000 # noinspection PyTypeChecker events = [ *[SvOsuMeasureLineEvent(",
"lambda x, n=n, t=t: # This flips the board if it's < 2",
"This flips the board if it's < 2 (-1 if n.column < 2",
"117103, 119326, 119494, 119661, 119827, 121953, 122114, 122275, 122434, 122594, 122673, 122752, 122831,",
"122831, 123068, 123147, 123226, 123304, 123383, 123539, 123618, 123696, 123773, 123851, 124007, 124084,",
"= 0.0010 SHAKE_WINDOW = 250 NOTE_DURATION = 2000 # noinspection PyTypeChecker events =",
"endY=1 - en / 500, funcs=[ lambda x, n=n, t=t: # This flips",
"116577, 116753, 116928, 117103, 119326, 119494, 119661, 119827, 121953, 122114, 122275, 122434, 122594,",
"# notes: 01:37:742 (97742|2,125moves993|2) - SHAKES = np.array( [100560, 100790, 101018, 101245, 104124,",
"107896, 108099, 110674, 110867, 111059, 111156, 111252, 111348, 113698, 113882, 114065, 114248, 116577,",
"123226, 123304, 123383, 123539, 123618, 123696, 123773, 123851, 124007, 124084, 124162, 124239, 124316,",
"n in m.notes.hits() if 97742 < n.offset <= 125993]) BASE_SHAKE_AMP = 0.010 INC_SHAKE_AMP",
"x, n=n, t=t: # This flips the board if it's < 2 (-1",
"if n.column < 2 else 1) * ( np.piecewise(x, [(i <= x) &",
"[*[lambda x, i=i, es=es: (BASE_SHAKE_AMP + es * INC_SHAKE_AMP) * np.sin((x - i)",
"123304, 123383, 123539, 123618, 123696, 123773, 123851, 124007, 124084, 124162, 124239, 124316, 124471,",
"124471, 124547, 124624, 124701, 124778, 124932, 125008, 125084, 125160, 125236, 125388, 125464, 125540,",
"2 (-1 if n.column < 2 else 1) * ( np.piecewise(x, [(i <=",
"- SHAKES = np.array( [100560, 100790, 101018, 101245, 104124, 104340, 104556, 104770, 107487,",
"x) & (x < i + SHAKE_WINDOW) for i in SHAKES], [*[lambda x,",
"(x < i + SHAKE_WINDOW) for i in SHAKES], [*[lambda x, i=i, es=es:",
"111059, 111156, 111252, 111348, 113698, 113882, 114065, 114248, 116577, 116753, 116928, 117103, 119326,",
"*[SvOsuMeasureLineEvent( firstOffset=n.offset - NOTE_DURATION - t, lastOffset=n.offset - t, startX=n.offset - NOTE_DURATION -",
"= sorted([n for n in m.notes.hits() if 97742 < n.offset <= 125993]) BASE_SHAKE_AMP",
"500, funcs=[ lambda x, n=n, t=t: # This flips the board if it's",
"(97742|2,125moves993|2) - SHAKES = np.array( [100560, 100790, 101018, 101245, 104124, 104340, 104556, 104770,",
"250 NOTE_DURATION = 2000 # noinspection PyTypeChecker events = [ *[SvOsuMeasureLineEvent( firstOffset=n.offset -",
"* pi / (SHAKE_WINDOW - es * 3)) for es, i in enumerate(SHAKES)],",
"in np.linspace(0, 24, NOTE_THICKNESS)] ] svs, bpms = svOsuMeasureLineMD(events, scalingFactor=SCALE, firstOffset=97742, lastOffset=125993, paddingSize=PADDING,",
"119494, 119661, 119827, 121953, 122114, 122275, 122434, 122594, 122673, 122752, 122831, 123068, 123147,",
"NOTE_DURATION = 2000 # noinspection PyTypeChecker events = [ *[SvOsuMeasureLineEvent( firstOffset=n.offset - NOTE_DURATION",
"import OsuMap # notes: 01:37:742 (97742|2,125moves993|2) - SHAKES = np.array( [100560, 100790, 101018,",
"f247(m: OsuMap): notes = sorted([n for n in m.notes.hits() if 97742 < n.offset",
"[(i <= x) & (x < i + SHAKE_WINDOW) for i in SHAKES],",
"en / 500, funcs=[ lambda x, n=n, t=t: # This flips the board",
"101245, 104124, 104340, 104556, 104770, 107487, 107692, 107896, 108099, 110674, 110867, 111059, 111156,",
"- en / 500, funcs=[ lambda x, n=n, t=t: # This flips the",
"124007, 124084, 124162, 124239, 124316, 124471, 124547, 124624, 124701, 124778, 124932, 125008, 125084,",
"(n.offset - t)) / NOTE_DURATION ) ]) for en, n in enumerate(notes) for",
"< 2 (-1 if n.column < 2 else 1) * ( np.piecewise(x, [(i",
"97742 < n.offset <= 125993]) BASE_SHAKE_AMP = 0.010 INC_SHAKE_AMP = 0.0010 SHAKE_WINDOW =",
"n.column < 2 else 1) * ( np.piecewise(x, [(i <= x) & (x",
"124624, 124701, 124778, 124932, 125008, 125084, 125160, 125236, 125388, 125464, 125540, 125616, 125692,",
"124239, 124316, 124471, 124547, 124624, 124701, 124778, 124932, 125008, 125084, 125160, 125236, 125388,",
"& (x < i + SHAKE_WINDOW) for i in SHAKES], [*[lambda x, i=i,",
"t=t: # This flips the board if it's < 2 (-1 if n.column",
"0]) + (x - (n.offset - t)) / NOTE_DURATION ) ]) for en,",
"123068, 123147, 123226, 123304, 123383, 123539, 123618, 123696, 123773, 123851, 124007, 124084, 124162,"
] |
[
"FOUND: \" + f # Alter the level alterLevelBy = abs(numLevels) pre =",
"KIND, either express or implied. # See the License for the specific language",
"Unless required by applicable law or agreed to in writing, software # distributed",
"at a maximum of 6 or minimum of 0\\n\") alterLevelBy = 5 p",
"\"\\n\") return Header(level, [REFFILE + str(ids), [], []], inline) elif key == 'CodeBlock':",
"in stderr: \" + str(stderr)) return stdout.decode() def get_contents_of_file(f, levels=u\"0\"): numLevels = int(levels)",
"in meta: reffile = re.sub(\"\\.md\", \".html\", meta['fromfile']['c']) REFFILE=\"~~\" + reffile + \"~~\" sys.stderr.write(reffile",
"re from subprocess import Popen, PIPE from pandocfilters import toJSONFilter, walk, get_value, Str,",
"subprocess import Popen, PIPE from pandocfilters import toJSONFilter, walk, get_value, Str, Header, Para,",
"while building a document, it outputs a document # map on stderr so",
"had results in stderr: \" + str(stderr)) return stdout.decode() def get_contents_of_file(f, levels=u\"0\"): numLevels",
"# The following is equivalent to: # # flattened = [] # for",
"REFFILE=\"~~\" + reffile + \"~~\" sys.stderr.write(reffile + \"\\n\") return Header(level, [REFFILE + str(ids),",
"# for item in sublist: # flattened.append(item) # return flattened return [item for",
"of bounds. Will stick at a maximum of 6 or minimum of 0\\n\")",
"this file except in compliance with the License. # You may obtain a",
"(stdout, stderr) = p.communicate() if str(stderr) != \"None\": sys.stderr.write(\"WARNING: Conversion to json had",
"on stderr so that a script can figure out where each part of",
"== 'CodeBlock': [[ident, classes, keyvals], code] = value if \"include\" in classes: rv",
"+ str(ids), [], []], inline) elif key == 'CodeBlock': [[ident, classes, keyvals], code]",
"classes, keyvals] = attr # Change the reference file if we see a",
"a document # map on stderr so that a script can figure out",
"sys.stderr.write(\"WARNING: Header change out of bounds. Will stick at a maximum of 6",
"+ \"\\n\") return Header(level, [REFFILE + str(ids), [], []], inline) elif key ==",
"ANY KIND, either express or implied. # See the License for the specific",
"file if we see a new level-1 header if level == 1 and",
"format, meta): global REFFILE if key == 'Header': [level, attr, inline] = value",
"\"~~\" sys.stderr.write(reffile + \"\\n\") return Header(level, [REFFILE + str(ids), [], []], inline) elif",
"to: # # flattened = [] # for sublist in rv: # for",
"os import sys import json import re from subprocess import Popen, PIPE from",
"classes, keyvals], code] = value if \"include\" in classes: rv = [] for",
"filter is identical to the include filter, except # that while building a",
"if numLevels > 0 else \"de\" if alterLevelBy > 5: sys.stderr.write(\"WARNING: Header change",
"0: if os.path.isfile(f): with open(f, \"r\") as myFile: return myFile.read() else: sys.stderr.write(\"WARNING: cannot",
"level is needed if numLevels == 0: if os.path.isfile(f): with open(f, \"r\") as",
"value if \"include\" in classes: rv = [] for l in code.splitlines(): l",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"format, meta) rv.append(altered['blocks']) else: sys.stderr.write(\"WARNING: Can't read file '\" + l + \"'.",
"= attr # Change the reference file if we see a new level-1",
"AT&T Intellectual Property. All rights reserved. # # Licensed under the Apache License,",
"[] # for sublist in rv: # for item in sublist: # flattened.append(item)",
"reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"[]], inline) elif key == 'CodeBlock': [[ident, classes, keyvals], code] = value if",
"def get_contents_of_file(f, levels=u\"0\"): numLevels = int(levels) # Return the contents of file unchanged",
"OF ANY KIND, either express or implied. # See the License for the",
"the reference file if we see a new level-1 header if level ==",
"\"de\" if alterLevelBy > 5: sys.stderr.write(\"WARNING: Header change out of bounds. Will stick",
"elif doc[0]: # old API meta = doc[0]['unMeta'] else: meta = {} #",
"\" + stderr) return stdout def docmap(key, value, format, meta): global REFFILE if",
"level == 1 and 'fromfile' in meta: reffile = re.sub(\"\\.md\", \".html\", meta['fromfile']['c']) REFFILE=\"~~\"",
"u't':'MetaString'} altered = walk(doc, docmap, format, meta) rv.append(altered['blocks']) else: sys.stderr.write(\"WARNING: Can't read file",
"a document, it outputs a document # map on stderr so that a",
"\"markdown\", \"-F\", \"flt-\" + pre + \"crement-header-\" + str(alterLevelBy) + \".py\", f], stdout=PIPE)",
"sys.stderr.write(\"WARNING: Conversion to json had results in stderr: \" + stderr) return stdout",
"python3 # Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. # #",
"unchanged if no change in level is needed if numLevels == 0: if",
"where each part of # the document came from. E.g. # # ```include",
"specific language governing permissions and # limitations under the License. ## # This",
"abs(numLevels) pre = \"in\" if numLevels > 0 else \"de\" if alterLevelBy >",
"building a document, it outputs a document # map on stderr so that",
"it outputs a document # map on stderr so that a script can",
"else: meta = {} # Add a file to the meta info meta['fromfile']=",
"# Change the reference file if we see a new level-1 header if",
"stderr: \" + str(stderr)) return stdout.decode() def get_contents_of_file(f, levels=u\"0\"): numLevels = int(levels) #",
"else \"de\" if alterLevelBy > 5: sys.stderr.write(\"WARNING: Header change out of bounds. Will",
"# that while building a document, it outputs a document # map on",
"== 1 and 'fromfile' in meta: reffile = re.sub(\"\\.md\", \".html\", meta['fromfile']['c']) REFFILE=\"~~\" +",
"any level. # import os import sys import json import re from subprocess",
"if no change in level is needed if numLevels == 0: if os.path.isfile(f):",
"= Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"markdown\", \"-F\", \"flt-\" + pre + \"crement-header-\" +",
"return [item for sublist in rv for item in sublist] if __name__ ==",
"stick at a maximum of 6 or minimum of 0\\n\") alterLevelBy = 5",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"{} # Add a file to the meta info meta['fromfile']= {u'c':l, u't':'MetaString'} altered",
"[ids, classes, keyvals] = attr # Change the reference file if we see",
"stdout = stdout.decode() if stderr is not None: stderr = stderr.decode() if stderr",
"if level == 1 and 'fromfile' in meta: reffile = re.sub(\"\\.md\", \".html\", meta['fromfile']['c'])",
"+ f # Alter the level alterLevelBy = abs(numLevels) pre = \"in\" if",
"following is equivalent to: # # flattened = [] # for sublist in",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"\"markdown\", \"-t\", \"markdown\", \"-F\", \"flt-\" + pre + \"crement-header-\" + str(alterLevelBy) + \".py\",",
"open(f, \"r\") as myFile: return myFile.read() else: sys.stderr.write(\"WARNING: cannot read \" + f)",
"[] for l in code.splitlines(): l = l.strip() if os.path.isfile(l): (headingLevel, dummy) =",
"reffile = re.sub(\"\\.md\", \".html\", meta['fromfile']['c']) REFFILE=\"~~\" + reffile + \"~~\" sys.stderr.write(reffile + \"\\n\")",
"script can figure out where each part of # the document came from.",
"5 p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"markdown\", \"-F\", \"flt-\" + pre +",
"[[ident, classes, keyvals], code] = value if \"include\" in classes: rv = []",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"out where each part of # the document came from. E.g. # #",
"meta: reffile = re.sub(\"\\.md\", \".html\", meta['fromfile']['c']) REFFILE=\"~~\" + reffile + \"~~\" sys.stderr.write(reffile +",
"Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"json\"], stdin=PIPE, stdout=PIPE) p.stdin.write(s.encode()) (stdout, stderr) = p.communicate() if",
"+ \"'. Skipping.\") # Return a flattened list using nested list comprehension #",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"== 0: if os.path.isfile(f): with open(f, \"r\") as myFile: return myFile.read() else: sys.stderr.write(\"WARNING:",
"inline) elif key == 'CodeBlock': [[ident, classes, keyvals], code] = value if \"include\"",
"Header change out of bounds. Will stick at a maximum of 6 or",
"\"flt-\" + pre + \"crement-header-\" + str(alterLevelBy) + \".py\", f], stdout=PIPE) (stdout, stderr)",
"str(stderr)) return stdout.decode() def get_contents_of_file(f, levels=u\"0\"): numLevels = int(levels) # Return the contents",
"stdout.decode() def get_contents_of_file(f, levels=u\"0\"): numLevels = int(levels) # Return the contents of file",
"= stderr.decode() if stderr != \"None\": sys.stderr.write(\"WARNING: Conversion to json had results in",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"file to the meta info meta['fromfile']= {u'c':l, u't':'MetaString'} altered = walk(doc, docmap, format,",
"can figure out where each part of # the document came from. E.g.",
"classes: rv = [] for l in code.splitlines(): l = l.strip() if os.path.isfile(l):",
"= [] # for sublist in rv: # for item in sublist: #",
"[REFFILE + str(ids), [], []], inline) elif key == 'CodeBlock': [[ident, classes, keyvals],",
"flattened list using nested list comprehension # # The following is equivalent to:",
"required by applicable law or agreed to in writing, software # distributed under",
"# flattened = [] # for sublist in rv: # for item in",
"stderr) return stdout def docmap(key, value, format, meta): global REFFILE if key ==",
"applicable law or agreed to in writing, software # distributed under the License",
"# # ```include # includethisfile.md # ``` # This filter is recursive, so",
"code] = value if \"include\" in classes: rv = [] for l in",
"global REFFILE if key == 'Header': [level, attr, inline] = value [ids, classes,",
"altered = walk(doc, docmap, format, meta) rv.append(altered['blocks']) else: sys.stderr.write(\"WARNING: Can't read file '\"",
"the level alterLevelBy = abs(numLevels) pre = \"in\" if numLevels > 0 else",
"(stdout, stderr) = p.communicate() stdout = stdout.decode() if stderr is not None: stderr",
"meta['fromfile']['c']) REFFILE=\"~~\" + reffile + \"~~\" sys.stderr.write(reffile + \"\\n\") return Header(level, [REFFILE +",
"is equivalent to: # # flattened = [] # for sublist in rv:",
"import json import re from subprocess import Popen, PIPE from pandocfilters import toJSONFilter,",
"or agreed to in writing, software # distributed under the License is distributed",
"includethisfile.md # ``` # This filter is recursive, so you markdown can include",
"list comprehension # # The following is equivalent to: # # flattened =",
"so you markdown can include # other markdown to any level. # import",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"All rights reserved. # # Licensed under the Apache License, Version 2.0 (the",
"p.stdin.write(s.encode()) (stdout, stderr) = p.communicate() if str(stderr) != \"None\": sys.stderr.write(\"WARNING: Conversion to json",
"read \" + f) return \"FILE NOT FOUND: \" + f # Alter",
"= int(levels) # Return the contents of file unchanged if no change in",
"markdown to any level. # import os import sys import json import re",
"of 6 or minimum of 0\\n\") alterLevelBy = 5 p = Popen([\"pandoc\", \"-f\",",
"the meta info meta['fromfile']= {u'c':l, u't':'MetaString'} altered = walk(doc, docmap, format, meta) rv.append(altered['blocks'])",
"language governing permissions and # limitations under the License. ## # This filter",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use",
"1 and 'fromfile' in meta: reffile = re.sub(\"\\.md\", \".html\", meta['fromfile']['c']) REFFILE=\"~~\" + reffile",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"you markdown can include # other markdown to any level. # import os",
"contents = get_contents_of_file(l, headingLevel) doc = json.loads(md_to_json(contents)) if 'meta' in doc: meta =",
"reffile + \"~~\" sys.stderr.write(reffile + \"\\n\") return Header(level, [REFFILE + str(ids), [], []],",
"flattened.append(item) # return flattened return [item for sublist in rv for item in",
"rv.append(altered['blocks']) else: sys.stderr.write(\"WARNING: Can't read file '\" + l + \"'. Skipping.\") #",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"stderr is not None: stderr = stderr.decode() if stderr != \"None\": sys.stderr.write(\"WARNING: Conversion",
"Str, Header, Para, Space REFFILE=\"\" def md_to_json(s): p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\",",
"License. # You may obtain a copy of the License at # #",
"doc[0]['unMeta'] else: meta = {} # Add a file to the meta info",
"from subprocess import Popen, PIPE from pandocfilters import toJSONFilter, walk, get_value, Str, Header,",
"\"markdown\", \"-t\", \"json\"], stdin=PIPE, stdout=PIPE) p.stdin.write(s.encode()) (stdout, stderr) = p.communicate() if str(stderr) !=",
"= re.sub(\"\\.md\", \".html\", meta['fromfile']['c']) REFFILE=\"~~\" + reffile + \"~~\" sys.stderr.write(reffile + \"\\n\") return",
"not headingLevel: headingLevel = 0 contents = get_contents_of_file(l, headingLevel) doc = json.loads(md_to_json(contents)) if",
"governing permissions and # limitations under the License. ## # This filter is",
"E.g. # # ```include # includethisfile.md # ``` # This filter is recursive,",
"compliance with the License. # You may obtain a copy of the License",
"other markdown to any level. # import os import sys import json import",
"Intellectual Property. All rights reserved. # # Licensed under the Apache License, Version",
"if 'meta' in doc: meta = doc['meta'] elif doc[0]: # old API meta",
"list using nested list comprehension # # The following is equivalent to: #",
"if not headingLevel: headingLevel = 0 contents = get_contents_of_file(l, headingLevel) doc = json.loads(md_to_json(contents))",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"change out of bounds. Will stick at a maximum of 6 or minimum",
"Conversion to json had results in stderr: \" + stderr) return stdout def",
"f], stdout=PIPE) (stdout, stderr) = p.communicate() stdout = stdout.decode() if stderr is not",
"in sublist: # flattened.append(item) # return flattened return [item for sublist in rv",
"is not None: stderr = stderr.decode() if stderr != \"None\": sys.stderr.write(\"WARNING: Conversion to",
"= value [ids, classes, keyvals] = attr # Change the reference file if",
"for the specific language governing permissions and # limitations under the License. ##",
"figure out where each part of # the document came from. E.g. #",
"change in level is needed if numLevels == 0: if os.path.isfile(f): with open(f,",
"str(ids), [], []], inline) elif key == 'CodeBlock': [[ident, classes, keyvals], code] =",
"a new level-1 header if level == 1 and 'fromfile' in meta: reffile",
"old API meta = doc[0]['unMeta'] else: meta = {} # Add a file",
"stdout=PIPE) p.stdin.write(s.encode()) (stdout, stderr) = p.communicate() if str(stderr) != \"None\": sys.stderr.write(\"WARNING: Conversion to",
"not use this file except in compliance with the License. # You may",
"results in stderr: \" + stderr) return stdout def docmap(key, value, format, meta):",
"Popen, PIPE from pandocfilters import toJSONFilter, walk, get_value, Str, Header, Para, Space REFFILE=\"\"",
"rv = [] for l in code.splitlines(): l = l.strip() if os.path.isfile(l): (headingLevel,",
"in stderr: \" + stderr) return stdout def docmap(key, value, format, meta): global",
"as myFile: return myFile.read() else: sys.stderr.write(\"WARNING: cannot read \" + f) return \"FILE",
"return Header(level, [REFFILE + str(ids), [], []], inline) elif key == 'CodeBlock': [[ident,",
"stderr) = p.communicate() if str(stderr) != \"None\": sys.stderr.write(\"WARNING: Conversion to json had results",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"needed if numLevels == 0: if os.path.isfile(f): with open(f, \"r\") as myFile: return",
"!= \"None\": sys.stderr.write(\"WARNING: Conversion to json had results in stderr: \" + str(stderr))",
"a flattened list using nested list comprehension # # The following is equivalent",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"\"None\": sys.stderr.write(\"WARNING: Conversion to json had results in stderr: \" + str(stderr)) return",
"\"None\": sys.stderr.write(\"WARNING: Conversion to json had results in stderr: \" + stderr) return",
"rv: # for item in sublist: # flattened.append(item) # return flattened return [item",
"get_contents_of_file(l, headingLevel) doc = json.loads(md_to_json(contents)) if 'meta' in doc: meta = doc['meta'] elif",
"and 'fromfile' in meta: reffile = re.sub(\"\\.md\", \".html\", meta['fromfile']['c']) REFFILE=\"~~\" + reffile +",
"if os.path.isfile(l): (headingLevel, dummy) = get_value(keyvals, \"heading-level\") if not headingLevel: headingLevel = 0",
"\"-f\", \"markdown\", \"-t\", \"json\"], stdin=PIPE, stdout=PIPE) p.stdin.write(s.encode()) (stdout, stderr) = p.communicate() if str(stderr)",
"md_to_json(s): p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"json\"], stdin=PIPE, stdout=PIPE) p.stdin.write(s.encode()) (stdout, stderr)",
"# you may not use this file except in compliance with the License.",
"if \"include\" in classes: rv = [] for l in code.splitlines(): l =",
"sys.stderr.write(\"WARNING: Can't read file '\" + l + \"'. Skipping.\") # Return a",
"+ f) return \"FILE NOT FOUND: \" + f # Alter the level",
"agreed to in writing, software # distributed under the License is distributed on",
"json.loads(md_to_json(contents)) if 'meta' in doc: meta = doc['meta'] elif doc[0]: # old API",
"= get_value(keyvals, \"heading-level\") if not headingLevel: headingLevel = 0 contents = get_contents_of_file(l, headingLevel)",
"file '\" + l + \"'. Skipping.\") # Return a flattened list using",
"and # limitations under the License. ## # This filter is identical to",
"of file unchanged if no change in level is needed if numLevels ==",
"(the \"License\"); # you may not use this file except in compliance with",
"= 0 contents = get_contents_of_file(l, headingLevel) doc = json.loads(md_to_json(contents)) if 'meta' in doc:",
"\"'. Skipping.\") # Return a flattened list using nested list comprehension # #",
"pre = \"in\" if numLevels > 0 else \"de\" if alterLevelBy > 5:",
"docmap, format, meta) rv.append(altered['blocks']) else: sys.stderr.write(\"WARNING: Can't read file '\" + l +",
"== 'Header': [level, attr, inline] = value [ids, classes, keyvals] = attr #",
"# Unless required by applicable law or agreed to in writing, software #",
"> 0 else \"de\" if alterLevelBy > 5: sys.stderr.write(\"WARNING: Header change out of",
"by applicable law or agreed to in writing, software # distributed under the",
"= doc['meta'] elif doc[0]: # old API meta = doc[0]['unMeta'] else: meta =",
"a file to the meta info meta['fromfile']= {u'c':l, u't':'MetaString'} altered = walk(doc, docmap,",
"l.strip() if os.path.isfile(l): (headingLevel, dummy) = get_value(keyvals, \"heading-level\") if not headingLevel: headingLevel =",
"# Return a flattened list using nested list comprehension # # The following",
"results in stderr: \" + str(stderr)) return stdout.decode() def get_contents_of_file(f, levels=u\"0\"): numLevels =",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"elif key == 'CodeBlock': [[ident, classes, keyvals], code] = value if \"include\" in",
"6 or minimum of 0\\n\") alterLevelBy = 5 p = Popen([\"pandoc\", \"-f\", \"markdown\",",
"headingLevel) doc = json.loads(md_to_json(contents)) if 'meta' in doc: meta = doc['meta'] elif doc[0]:",
"PIPE from pandocfilters import toJSONFilter, walk, get_value, Str, Header, Para, Space REFFILE=\"\" def",
"5: sys.stderr.write(\"WARNING: Header change out of bounds. Will stick at a maximum of",
"p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"json\"], stdin=PIPE, stdout=PIPE) p.stdin.write(s.encode()) (stdout, stderr) =",
"dummy) = get_value(keyvals, \"heading-level\") if not headingLevel: headingLevel = 0 contents = get_contents_of_file(l,",
"comprehension # # The following is equivalent to: # # flattened = []",
"toJSONFilter, walk, get_value, Str, Header, Para, Space REFFILE=\"\" def md_to_json(s): p = Popen([\"pandoc\",",
"filter is recursive, so you markdown can include # other markdown to any",
"limitations under the License. ## # This filter is identical to the include",
"stdout.decode() if stderr is not None: stderr = stderr.decode() if stderr != \"None\":",
"(c) 2017 AT&T Intellectual Property. All rights reserved. # # Licensed under the",
"l = l.strip() if os.path.isfile(l): (headingLevel, dummy) = get_value(keyvals, \"heading-level\") if not headingLevel:",
"file except in compliance with the License. # You may obtain a copy",
"Alter the level alterLevelBy = abs(numLevels) pre = \"in\" if numLevels > 0",
"sys.stderr.write(reffile + \"\\n\") return Header(level, [REFFILE + str(ids), [], []], inline) elif key",
"0 else \"de\" if alterLevelBy > 5: sys.stderr.write(\"WARNING: Header change out of bounds.",
"recursive, so you markdown can include # other markdown to any level. #",
"License for the specific language governing permissions and # limitations under the License.",
"to json had results in stderr: \" + str(stderr)) return stdout.decode() def get_contents_of_file(f,",
"Return the contents of file unchanged if no change in level is needed",
"to in writing, software # distributed under the License is distributed on an",
"file unchanged if no change in level is needed if numLevels == 0:",
"\"json\"], stdin=PIPE, stdout=PIPE) p.stdin.write(s.encode()) (stdout, stderr) = p.communicate() if str(stderr) != \"None\": sys.stderr.write(\"WARNING:",
"implied. # See the License for the specific language governing permissions and #",
"# old API meta = doc[0]['unMeta'] else: meta = {} # Add a",
"\"License\"); # you may not use this file except in compliance with the",
"# This filter is recursive, so you markdown can include # other markdown",
"\".py\", f], stdout=PIPE) (stdout, stderr) = p.communicate() stdout = stdout.decode() if stderr is",
"if stderr != \"None\": sys.stderr.write(\"WARNING: Conversion to json had results in stderr: \"",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"no change in level is needed if numLevels == 0: if os.path.isfile(f): with",
"can include # other markdown to any level. # import os import sys",
"equivalent to: # # flattened = [] # for sublist in rv: #",
"= doc[0]['unMeta'] else: meta = {} # Add a file to the meta",
"# flattened.append(item) # return flattened return [item for sublist in rv for item",
"import toJSONFilter, walk, get_value, Str, Header, Para, Space REFFILE=\"\" def md_to_json(s): p =",
"for item in sublist: # flattened.append(item) # return flattened return [item for sublist",
"\"in\" if numLevels > 0 else \"de\" if alterLevelBy > 5: sys.stderr.write(\"WARNING: Header",
"re.sub(\"\\.md\", \".html\", meta['fromfile']['c']) REFFILE=\"~~\" + reffile + \"~~\" sys.stderr.write(reffile + \"\\n\") return Header(level,",
"if numLevels == 0: if os.path.isfile(f): with open(f, \"r\") as myFile: return myFile.read()",
"alterLevelBy = abs(numLevels) pre = \"in\" if numLevels > 0 else \"de\" if",
"The following is equivalent to: # # flattened = [] # for sublist",
"l in code.splitlines(): l = l.strip() if os.path.isfile(l): (headingLevel, dummy) = get_value(keyvals, \"heading-level\")",
"of 0\\n\") alterLevelBy = 5 p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"markdown\", \"-F\",",
"docmap(key, value, format, meta): global REFFILE if key == 'Header': [level, attr, inline]",
"info meta['fromfile']= {u'c':l, u't':'MetaString'} altered = walk(doc, docmap, format, meta) rv.append(altered['blocks']) else: sys.stderr.write(\"WARNING:",
"or implied. # See the License for the specific language governing permissions and",
"that while building a document, it outputs a document # map on stderr",
"in code.splitlines(): l = l.strip() if os.path.isfile(l): (headingLevel, dummy) = get_value(keyvals, \"heading-level\") if",
"get_value, Str, Header, Para, Space REFFILE=\"\" def md_to_json(s): p = Popen([\"pandoc\", \"-f\", \"markdown\",",
"json had results in stderr: \" + stderr) return stdout def docmap(key, value,",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"\" + str(stderr)) return stdout.decode() def get_contents_of_file(f, levels=u\"0\"): numLevels = int(levels) # Return",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"with open(f, \"r\") as myFile: return myFile.read() else: sys.stderr.write(\"WARNING: cannot read \" +",
"# # flattened = [] # for sublist in rv: # for item",
"is identical to the include filter, except # that while building a document,",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"in writing, software # distributed under the License is distributed on an \"AS",
"# # The following is equivalent to: # # flattened = [] #",
"= walk(doc, docmap, format, meta) rv.append(altered['blocks']) else: sys.stderr.write(\"WARNING: Can't read file '\" +",
"[level, attr, inline] = value [ids, classes, keyvals] = attr # Change the",
"pre + \"crement-header-\" + str(alterLevelBy) + \".py\", f], stdout=PIPE) (stdout, stderr) = p.communicate()",
"= l.strip() if os.path.isfile(l): (headingLevel, dummy) = get_value(keyvals, \"heading-level\") if not headingLevel: headingLevel",
"Return a flattened list using nested list comprehension # # The following is",
"map on stderr so that a script can figure out where each part",
"# See the License for the specific language governing permissions and # limitations",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"get_value(keyvals, \"heading-level\") if not headingLevel: headingLevel = 0 contents = get_contents_of_file(l, headingLevel) doc",
"Para, Space REFFILE=\"\" def md_to_json(s): p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"json\"], stdin=PIPE,",
"+ \"~~\" sys.stderr.write(reffile + \"\\n\") return Header(level, [REFFILE + str(ids), [], []], inline)",
"not None: stderr = stderr.decode() if stderr != \"None\": sys.stderr.write(\"WARNING: Conversion to json",
"'Header': [level, attr, inline] = value [ids, classes, keyvals] = attr # Change",
"for sublist in rv: # for item in sublist: # flattened.append(item) # return",
"to any level. # import os import sys import json import re from",
"This filter is identical to the include filter, except # that while building",
"json import re from subprocess import Popen, PIPE from pandocfilters import toJSONFilter, walk,",
"the include filter, except # that while building a document, it outputs a",
"sys.stderr.write(\"WARNING: Conversion to json had results in stderr: \" + str(stderr)) return stdout.decode()",
"Skipping.\") # Return a flattened list using nested list comprehension # # The",
"minimum of 0\\n\") alterLevelBy = 5 p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"markdown\",",
"if alterLevelBy > 5: sys.stderr.write(\"WARNING: Header change out of bounds. Will stick at",
"new level-1 header if level == 1 and 'fromfile' in meta: reffile =",
"the contents of file unchanged if no change in level is needed if",
"= 5 p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"markdown\", \"-F\", \"flt-\" + pre",
"if key == 'Header': [level, attr, inline] = value [ids, classes, keyvals] =",
"attr # Change the reference file if we see a new level-1 header",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"you may not use this file except in compliance with the License. #",
"inline] = value [ids, classes, keyvals] = attr # Change the reference file",
"levels=u\"0\"): numLevels = int(levels) # Return the contents of file unchanged if no",
"alterLevelBy = 5 p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"markdown\", \"-F\", \"flt-\" +",
"stderr = stderr.decode() if stderr != \"None\": sys.stderr.write(\"WARNING: Conversion to json had results",
"l + \"'. Skipping.\") # Return a flattened list using nested list comprehension",
"# map on stderr so that a script can figure out where each",
"key == 'Header': [level, attr, inline] = value [ids, classes, keyvals] = attr",
"return stdout.decode() def get_contents_of_file(f, levels=u\"0\"): numLevels = int(levels) # Return the contents of",
"flattened return [item for sublist in rv for item in sublist] if __name__",
"pandocfilters import toJSONFilter, walk, get_value, Str, Header, Para, Space REFFILE=\"\" def md_to_json(s): p",
"if os.path.isfile(f): with open(f, \"r\") as myFile: return myFile.read() else: sys.stderr.write(\"WARNING: cannot read",
"os.path.isfile(f): with open(f, \"r\") as myFile: return myFile.read() else: sys.stderr.write(\"WARNING: cannot read \"",
"p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"markdown\", \"-F\", \"flt-\" + pre + \"crement-header-\"",
"# the document came from. E.g. # # ```include # includethisfile.md # ```",
"use this file except in compliance with the License. # You may obtain",
"= get_contents_of_file(l, headingLevel) doc = json.loads(md_to_json(contents)) if 'meta' in doc: meta = doc['meta']",
"else: sys.stderr.write(\"WARNING: cannot read \" + f) return \"FILE NOT FOUND: \" +",
"# other markdown to any level. # import os import sys import json",
"numLevels > 0 else \"de\" if alterLevelBy > 5: sys.stderr.write(\"WARNING: Header change out",
"walk, get_value, Str, Header, Para, Space REFFILE=\"\" def md_to_json(s): p = Popen([\"pandoc\", \"-f\",",
"import os import sys import json import re from subprocess import Popen, PIPE",
"alterLevelBy > 5: sys.stderr.write(\"WARNING: Header change out of bounds. Will stick at a",
"we see a new level-1 header if level == 1 and 'fromfile' in",
"meta info meta['fromfile']= {u'c':l, u't':'MetaString'} altered = walk(doc, docmap, format, meta) rv.append(altered['blocks']) else:",
"to json had results in stderr: \" + stderr) return stdout def docmap(key,",
"stderr: \" + stderr) return stdout def docmap(key, value, format, meta): global REFFILE",
"stdout=PIPE) (stdout, stderr) = p.communicate() stdout = stdout.decode() if stderr is not None:",
"is needed if numLevels == 0: if os.path.isfile(f): with open(f, \"r\") as myFile:",
"meta): global REFFILE if key == 'Header': [level, attr, inline] = value [ids,",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"the document came from. E.g. # # ```include # includethisfile.md # ``` #",
"# limitations under the License. ## # This filter is identical to the",
"to the meta info meta['fromfile']= {u'c':l, u't':'MetaString'} altered = walk(doc, docmap, format, meta)",
"License. ## # This filter is identical to the include filter, except #",
"or minimum of 0\\n\") alterLevelBy = 5 p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\",",
"maximum of 6 or minimum of 0\\n\") alterLevelBy = 5 p = Popen([\"pandoc\",",
"value, format, meta): global REFFILE if key == 'Header': [level, attr, inline] =",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"\"-t\", \"json\"], stdin=PIPE, stdout=PIPE) p.stdin.write(s.encode()) (stdout, stderr) = p.communicate() if str(stderr) != \"None\":",
"for l in code.splitlines(): l = l.strip() if os.path.isfile(l): (headingLevel, dummy) = get_value(keyvals,",
"stderr so that a script can figure out where each part of #",
"in classes: rv = [] for l in code.splitlines(): l = l.strip() if",
"p.communicate() if str(stderr) != \"None\": sys.stderr.write(\"WARNING: Conversion to json had results in stderr:",
"outputs a document # map on stderr so that a script can figure",
"in doc: meta = doc['meta'] elif doc[0]: # old API meta = doc[0]['unMeta']",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"headingLevel: headingLevel = 0 contents = get_contents_of_file(l, headingLevel) doc = json.loads(md_to_json(contents)) if 'meta'",
"header if level == 1 and 'fromfile' in meta: reffile = re.sub(\"\\.md\", \".html\",",
"in rv: # for item in sublist: # flattened.append(item) # return flattened return",
"\" + f # Alter the level alterLevelBy = abs(numLevels) pre = \"in\"",
"bounds. Will stick at a maximum of 6 or minimum of 0\\n\") alterLevelBy",
"meta = doc[0]['unMeta'] else: meta = {} # Add a file to the",
"#!/usr/bin/env python3 # Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. #",
"# # Unless required by applicable law or agreed to in writing, software",
"int(levels) # Return the contents of file unchanged if no change in level",
"express or implied. # See the License for the specific language governing permissions",
"+ pre + \"crement-header-\" + str(alterLevelBy) + \".py\", f], stdout=PIPE) (stdout, stderr) =",
"+ str(stderr)) return stdout.decode() def get_contents_of_file(f, levels=u\"0\"): numLevels = int(levels) # Return the",
"0 contents = get_contents_of_file(l, headingLevel) doc = json.loads(md_to_json(contents)) if 'meta' in doc: meta",
"code.splitlines(): l = l.strip() if os.path.isfile(l): (headingLevel, dummy) = get_value(keyvals, \"heading-level\") if not",
"a script can figure out where each part of # the document came",
"walk(doc, docmap, format, meta) rv.append(altered['blocks']) else: sys.stderr.write(\"WARNING: Can't read file '\" + l",
"numLevels == 0: if os.path.isfile(f): with open(f, \"r\") as myFile: return myFile.read() else:",
"either express or implied. # See the License for the specific language governing",
"numLevels = int(levels) # Return the contents of file unchanged if no change",
"'CodeBlock': [[ident, classes, keyvals], code] = value if \"include\" in classes: rv =",
"2017 AT&T Intellectual Property. All rights reserved. # # Licensed under the Apache",
"return myFile.read() else: sys.stderr.write(\"WARNING: cannot read \" + f) return \"FILE NOT FOUND:",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"0\\n\") alterLevelBy = 5 p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"markdown\", \"-F\", \"flt-\"",
"a maximum of 6 or minimum of 0\\n\") alterLevelBy = 5 p =",
"doc['meta'] elif doc[0]: # old API meta = doc[0]['unMeta'] else: meta = {}",
"os.path.isfile(l): (headingLevel, dummy) = get_value(keyvals, \"heading-level\") if not headingLevel: headingLevel = 0 contents",
"meta) rv.append(altered['blocks']) else: sys.stderr.write(\"WARNING: Can't read file '\" + l + \"'. Skipping.\")",
"stdout def docmap(key, value, format, meta): global REFFILE if key == 'Header': [level,",
"using nested list comprehension # # The following is equivalent to: # #",
"{u'c':l, u't':'MetaString'} altered = walk(doc, docmap, format, meta) rv.append(altered['blocks']) else: sys.stderr.write(\"WARNING: Can't read",
"is recursive, so you markdown can include # other markdown to any level.",
"= value if \"include\" in classes: rv = [] for l in code.splitlines():",
"import sys import json import re from subprocess import Popen, PIPE from pandocfilters",
"headingLevel = 0 contents = get_contents_of_file(l, headingLevel) doc = json.loads(md_to_json(contents)) if 'meta' in",
"= json.loads(md_to_json(contents)) if 'meta' in doc: meta = doc['meta'] elif doc[0]: # old",
"for sublist in rv for item in sublist] if __name__ == \"__main__\": toJSONFilter(docmap)",
"the License. # You may obtain a copy of the License at #",
"value [ids, classes, keyvals] = attr # Change the reference file if we",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"\"heading-level\") if not headingLevel: headingLevel = 0 contents = get_contents_of_file(l, headingLevel) doc =",
"p.communicate() stdout = stdout.decode() if stderr is not None: stderr = stderr.decode() if",
"sublist in rv: # for item in sublist: # flattened.append(item) # return flattened",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"stdin=PIPE, stdout=PIPE) p.stdin.write(s.encode()) (stdout, stderr) = p.communicate() if str(stderr) != \"None\": sys.stderr.write(\"WARNING: Conversion",
"None: stderr = stderr.decode() if stderr != \"None\": sys.stderr.write(\"WARNING: Conversion to json had",
"## # This filter is identical to the include filter, except # that",
"\"crement-header-\" + str(alterLevelBy) + \".py\", f], stdout=PIPE) (stdout, stderr) = p.communicate() stdout =",
"myFile: return myFile.read() else: sys.stderr.write(\"WARNING: cannot read \" + f) return \"FILE NOT",
"return stdout def docmap(key, value, format, meta): global REFFILE if key == 'Header':",
"keyvals] = attr # Change the reference file if we see a new",
"if stderr is not None: stderr = stderr.decode() if stderr != \"None\": sys.stderr.write(\"WARNING:",
"return \"FILE NOT FOUND: \" + f # Alter the level alterLevelBy =",
"= stdout.decode() if stderr is not None: stderr = stderr.decode() if stderr !=",
"Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. # # Licensed under",
"```include # includethisfile.md # ``` # This filter is recursive, so you markdown",
"each part of # the document came from. E.g. # # ```include #",
"to the include filter, except # that while building a document, it outputs",
"with the License. # You may obtain a copy of the License at",
"part of # the document came from. E.g. # # ```include # includethisfile.md",
"str(alterLevelBy) + \".py\", f], stdout=PIPE) (stdout, stderr) = p.communicate() stdout = stdout.decode() if",
"\"FILE NOT FOUND: \" + f # Alter the level alterLevelBy = abs(numLevels)",
"= p.communicate() if str(stderr) != \"None\": sys.stderr.write(\"WARNING: Conversion to json had results in",
"= Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"json\"], stdin=PIPE, stdout=PIPE) p.stdin.write(s.encode()) (stdout, stderr) = p.communicate()",
"import Popen, PIPE from pandocfilters import toJSONFilter, walk, get_value, Str, Header, Para, Space",
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"API meta = doc[0]['unMeta'] else: meta = {} # Add a file to",
"from. E.g. # # ```include # includethisfile.md # ``` # This filter is",
"contents of file unchanged if no change in level is needed if numLevels",
"# return flattened return [item for sublist in rv for item in sublist]",
"rights reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"of # the document came from. E.g. # # ```include # includethisfile.md #",
"(headingLevel, dummy) = get_value(keyvals, \"heading-level\") if not headingLevel: headingLevel = 0 contents =",
"law or agreed to in writing, software # distributed under the License is",
"keyvals], code] = value if \"include\" in classes: rv = [] for l",
"the License for the specific language governing permissions and # limitations under the",
"the License. ## # This filter is identical to the include filter, except",
"= [] for l in code.splitlines(): l = l.strip() if os.path.isfile(l): (headingLevel, dummy)",
"level. # import os import sys import json import re from subprocess import",
"+ str(alterLevelBy) + \".py\", f], stdout=PIPE) (stdout, stderr) = p.communicate() stdout = stdout.decode()",
"doc[0]: # old API meta = doc[0]['unMeta'] else: meta = {} # Add",
"'fromfile' in meta: reffile = re.sub(\"\\.md\", \".html\", meta['fromfile']['c']) REFFILE=\"~~\" + reffile + \"~~\"",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"Will stick at a maximum of 6 or minimum of 0\\n\") alterLevelBy =",
"NOT FOUND: \" + f # Alter the level alterLevelBy = abs(numLevels) pre",
"if str(stderr) != \"None\": sys.stderr.write(\"WARNING: Conversion to json had results in stderr: \"",
"# ``` # This filter is recursive, so you markdown can include #",
"document # map on stderr so that a script can figure out where",
"= {} # Add a file to the meta info meta['fromfile']= {u'c':l, u't':'MetaString'}",
"in level is needed if numLevels == 0: if os.path.isfile(f): with open(f, \"r\")",
"= \"in\" if numLevels > 0 else \"de\" if alterLevelBy > 5: sys.stderr.write(\"WARNING:",
"Header, Para, Space REFFILE=\"\" def md_to_json(s): p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"json\"],",
"document, it outputs a document # map on stderr so that a script",
"# ```include # includethisfile.md # ``` # This filter is recursive, so you",
"def md_to_json(s): p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"json\"], stdin=PIPE, stdout=PIPE) p.stdin.write(s.encode()) (stdout,",
"in compliance with the License. # You may obtain a copy of the",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"filter, except # that while building a document, it outputs a document #",
"[], []], inline) elif key == 'CodeBlock': [[ident, classes, keyvals], code] = value",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"= p.communicate() stdout = stdout.decode() if stderr is not None: stderr = stderr.decode()",
"doc = json.loads(md_to_json(contents)) if 'meta' in doc: meta = doc['meta'] elif doc[0]: #",
"json had results in stderr: \" + str(stderr)) return stdout.decode() def get_contents_of_file(f, levels=u\"0\"):",
"the specific language governing permissions and # limitations under the License. ## #",
"!= \"None\": sys.stderr.write(\"WARNING: Conversion to json had results in stderr: \" + stderr)",
"else: sys.stderr.write(\"WARNING: Can't read file '\" + l + \"'. Skipping.\") # Return",
"sys.stderr.write(\"WARNING: cannot read \" + f) return \"FILE NOT FOUND: \" + f",
"include # other markdown to any level. # import os import sys import",
"See the License for the specific language governing permissions and # limitations under",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"Space REFFILE=\"\" def md_to_json(s): p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"json\"], stdin=PIPE, stdout=PIPE)",
"identical to the include filter, except # that while building a document, it",
"[item for sublist in rv for item in sublist] if __name__ == \"__main__\":",
"except # that while building a document, it outputs a document # map",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"flattened = [] # for sublist in rv: # for item in sublist:",
"f # Alter the level alterLevelBy = abs(numLevels) pre = \"in\" if numLevels",
"if we see a new level-1 header if level == 1 and 'fromfile'",
"item in sublist: # flattened.append(item) # return flattened return [item for sublist in",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"level-1 header if level == 1 and 'fromfile' in meta: reffile = re.sub(\"\\.md\",",
"sublist: # flattened.append(item) # return flattened return [item for sublist in rv for",
"``` # This filter is recursive, so you markdown can include # other",
"This filter is recursive, so you markdown can include # other markdown to",
"f) return \"FILE NOT FOUND: \" + f # Alter the level alterLevelBy",
"+ \"crement-header-\" + str(alterLevelBy) + \".py\", f], stdout=PIPE) (stdout, stderr) = p.communicate() stdout",
"# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved. # # Licensed",
"nested list comprehension # # The following is equivalent to: # # flattened",
"meta = doc['meta'] elif doc[0]: # old API meta = doc[0]['unMeta'] else: meta",
"# Alter the level alterLevelBy = abs(numLevels) pre = \"in\" if numLevels >",
"so that a script can figure out where each part of # the",
"include filter, except # that while building a document, it outputs a document",
"get_contents_of_file(f, levels=u\"0\"): numLevels = int(levels) # Return the contents of file unchanged if",
"Conversion to json had results in stderr: \" + str(stderr)) return stdout.decode() def",
"+ \".py\", f], stdout=PIPE) (stdout, stderr) = p.communicate() stdout = stdout.decode() if stderr",
"'\" + l + \"'. Skipping.\") # Return a flattened list using nested",
"\"r\") as myFile: return myFile.read() else: sys.stderr.write(\"WARNING: cannot read \" + f) return",
"= abs(numLevels) pre = \"in\" if numLevels > 0 else \"de\" if alterLevelBy",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"except in compliance with the License. # You may obtain a copy of",
"> 5: sys.stderr.write(\"WARNING: Header change out of bounds. Will stick at a maximum",
"Property. All rights reserved. # # Licensed under the Apache License, Version 2.0",
"+ stderr) return stdout def docmap(key, value, format, meta): global REFFILE if key",
"Can't read file '\" + l + \"'. Skipping.\") # Return a flattened",
"cannot read \" + f) return \"FILE NOT FOUND: \" + f #",
"\"include\" in classes: rv = [] for l in code.splitlines(): l = l.strip()",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"Change the reference file if we see a new level-1 header if level",
"attr, inline] = value [ids, classes, keyvals] = attr # Change the reference",
"doc: meta = doc['meta'] elif doc[0]: # old API meta = doc[0]['unMeta'] else:",
"return flattened return [item for sublist in rv for item in sublist] if",
"reference file if we see a new level-1 header if level == 1",
"'meta' in doc: meta = doc['meta'] elif doc[0]: # old API meta =",
"stderr.decode() if stderr != \"None\": sys.stderr.write(\"WARNING: Conversion to json had results in stderr:",
"level alterLevelBy = abs(numLevels) pre = \"in\" if numLevels > 0 else \"de\"",
"myFile.read() else: sys.stderr.write(\"WARNING: cannot read \" + f) return \"FILE NOT FOUND: \"",
"permissions and # limitations under the License. ## # This filter is identical",
"sys import json import re from subprocess import Popen, PIPE from pandocfilters import",
"from pandocfilters import toJSONFilter, walk, get_value, Str, Header, Para, Space REFFILE=\"\" def md_to_json(s):",
"\" + f) return \"FILE NOT FOUND: \" + f # Alter the",
"out of bounds. Will stick at a maximum of 6 or minimum of",
"REFFILE if key == 'Header': [level, attr, inline] = value [ids, classes, keyvals]",
"\".html\", meta['fromfile']['c']) REFFILE=\"~~\" + reffile + \"~~\" sys.stderr.write(reffile + \"\\n\") return Header(level, [REFFILE",
"\"-t\", \"markdown\", \"-F\", \"flt-\" + pre + \"crement-header-\" + str(alterLevelBy) + \".py\", f],",
"read file '\" + l + \"'. Skipping.\") # Return a flattened list",
"import re from subprocess import Popen, PIPE from pandocfilters import toJSONFilter, walk, get_value,",
"Header(level, [REFFILE + str(ids), [], []], inline) elif key == 'CodeBlock': [[ident, classes,",
"# Add a file to the meta info meta['fromfile']= {u'c':l, u't':'MetaString'} altered =",
"meta = {} # Add a file to the meta info meta['fromfile']= {u'c':l,",
"meta['fromfile']= {u'c':l, u't':'MetaString'} altered = walk(doc, docmap, format, meta) rv.append(altered['blocks']) else: sys.stderr.write(\"WARNING: Can't",
"def docmap(key, value, format, meta): global REFFILE if key == 'Header': [level, attr,",
"markdown can include # other markdown to any level. # import os import",
"key == 'CodeBlock': [[ident, classes, keyvals], code] = value if \"include\" in classes:",
"had results in stderr: \" + stderr) return stdout def docmap(key, value, format,",
"document came from. E.g. # # ```include # includethisfile.md # ``` # This",
"# for sublist in rv: # for item in sublist: # flattened.append(item) #",
"Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"markdown\", \"-F\", \"flt-\" + pre + \"crement-header-\" + str(alterLevelBy)",
"str(stderr) != \"None\": sys.stderr.write(\"WARNING: Conversion to json had results in stderr: \" +",
"\"-F\", \"flt-\" + pre + \"crement-header-\" + str(alterLevelBy) + \".py\", f], stdout=PIPE) (stdout,",
"# import os import sys import json import re from subprocess import Popen,",
"REFFILE=\"\" def md_to_json(s): p = Popen([\"pandoc\", \"-f\", \"markdown\", \"-t\", \"json\"], stdin=PIPE, stdout=PIPE) p.stdin.write(s.encode())",
"came from. E.g. # # ```include # includethisfile.md # ``` # This filter",
"\"-f\", \"markdown\", \"-t\", \"markdown\", \"-F\", \"flt-\" + pre + \"crement-header-\" + str(alterLevelBy) +",
"# This filter is identical to the include filter, except # that while",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"under the License. ## # This filter is identical to the include filter,",
"that a script can figure out where each part of # the document",
"stderr != \"None\": sys.stderr.write(\"WARNING: Conversion to json had results in stderr: \" +",
"+ l + \"'. Skipping.\") # Return a flattened list using nested list",
"stderr) = p.communicate() stdout = stdout.decode() if stderr is not None: stderr =",
"+ reffile + \"~~\" sys.stderr.write(reffile + \"\\n\") return Header(level, [REFFILE + str(ids), [],",
"# Return the contents of file unchanged if no change in level is",
"see a new level-1 header if level == 1 and 'fromfile' in meta:",
"Add a file to the meta info meta['fromfile']= {u'c':l, u't':'MetaString'} altered = walk(doc,",
"# includethisfile.md # ``` # This filter is recursive, so you markdown can"
] |
[
"command help) * MUST provide a `setup_parser()` function * MUST provide a `command()`",
"config.plugins.extension self.settings = self.type_settings.get(self.name()) @classmethod def name(cls): \"\"\"Return the name of the Command",
"expose subcommand, the plugin module.. * MUST have a module-level docstring (used as",
"MUST provide a `register_plugin()` function * SHOULD have a module-level attribute `command_behavior` For",
"`command_behavior` For example, a plugin named 'foo' and this is the `foo.py`: '''The",
"rez.config import config class Command(object): \"\"\"An interface for registering custom Rez subcommand To",
"CommandFoo(Command): schema_dict = {} @classmethod def name(cls): return \"foo\" def register_plugin(): return CommandFoo",
"# optional: None, \"passthrough\", \"grouped\" } def setup_parser(parser, completions=False): parser.add_argument(\"--hello\", ...) def command(opts,",
"have a module-level docstring (used as the command help) * MUST provide a",
"= { \"hidden\": False, # optional: bool \"arg_mode\": None, # optional: None, \"passthrough\",",
"Project from rez.config import config class Command(object): \"\"\"An interface for registering custom Rez",
"''' from rez.command import Command command_behavior = { \"hidden\": False, # optional: bool",
"False, # optional: bool \"arg_mode\": None, # optional: None, \"passthrough\", \"grouped\" } def",
"if opts.hello: print(\"world\") class CommandFoo(Command): schema_dict = {} @classmethod def name(cls): return \"foo\"",
"Rez Project from rez.config import config class Command(object): \"\"\"An interface for registering custom",
"Copyright Contributors to the Rez Project from rez.config import config class Command(object): \"\"\"An",
"def name(cls): return \"foo\" def register_plugin(): return CommandFoo \"\"\" def __init__(self): self.type_settings =",
"module.. * MUST have a module-level docstring (used as the command help) *",
"subcommand To register plugin and expose subcommand, the plugin module.. * MUST have",
"is the `foo.py`: '''The docstring for command help, this is required. ''' from",
"class Command(object): \"\"\"An interface for registering custom Rez subcommand To register plugin and",
"\"\"\"An interface for registering custom Rez subcommand To register plugin and expose subcommand,",
"for command help, this is required. ''' from rez.command import Command command_behavior =",
"docstring (used as the command help) * MUST provide a `setup_parser()` function *",
"import config class Command(object): \"\"\"An interface for registering custom Rez subcommand To register",
"subcommand, the plugin module.. * MUST have a module-level docstring (used as the",
"as the command help) * MUST provide a `setup_parser()` function * MUST provide",
"the `foo.py`: '''The docstring for command help, this is required. ''' from rez.command",
"optional: bool \"arg_mode\": None, # optional: None, \"passthrough\", \"grouped\" } def setup_parser(parser, completions=False):",
"@classmethod def name(cls): \"\"\"Return the name of the Command and rez-subcommand.\"\"\" raise NotImplementedError",
"from rez.config import config class Command(object): \"\"\"An interface for registering custom Rez subcommand",
"parser=None, extra_arg_groups=None): if opts.hello: print(\"world\") class CommandFoo(Command): schema_dict = {} @classmethod def name(cls):",
"Rez subcommand To register plugin and expose subcommand, the plugin module.. * MUST",
"...) def command(opts, parser=None, extra_arg_groups=None): if opts.hello: print(\"world\") class CommandFoo(Command): schema_dict = {}",
"# SPDX-License-Identifier: Apache-2.0 # Copyright Contributors to the Rez Project from rez.config import",
"a module-level attribute `command_behavior` For example, a plugin named 'foo' and this is",
"and expose subcommand, the plugin module.. * MUST have a module-level docstring (used",
"opts.hello: print(\"world\") class CommandFoo(Command): schema_dict = {} @classmethod def name(cls): return \"foo\" def",
"have a module-level attribute `command_behavior` For example, a plugin named 'foo' and this",
"example, a plugin named 'foo' and this is the `foo.py`: '''The docstring for",
"plugin and expose subcommand, the plugin module.. * MUST have a module-level docstring",
"def setup_parser(parser, completions=False): parser.add_argument(\"--hello\", ...) def command(opts, parser=None, extra_arg_groups=None): if opts.hello: print(\"world\") class",
"named 'foo' and this is the `foo.py`: '''The docstring for command help, this",
"optional: None, \"passthrough\", \"grouped\" } def setup_parser(parser, completions=False): parser.add_argument(\"--hello\", ...) def command(opts, parser=None,",
"return \"foo\" def register_plugin(): return CommandFoo \"\"\" def __init__(self): self.type_settings = config.plugins.extension self.settings",
"* MUST provide a `command()` function * MUST provide a `register_plugin()` function *",
"registering custom Rez subcommand To register plugin and expose subcommand, the plugin module..",
"completions=False): parser.add_argument(\"--hello\", ...) def command(opts, parser=None, extra_arg_groups=None): if opts.hello: print(\"world\") class CommandFoo(Command): schema_dict",
"custom Rez subcommand To register plugin and expose subcommand, the plugin module.. *",
"from rez.command import Command command_behavior = { \"hidden\": False, # optional: bool \"arg_mode\":",
"interface for registering custom Rez subcommand To register plugin and expose subcommand, the",
"* MUST provide a `setup_parser()` function * MUST provide a `command()` function *",
"Command(object): \"\"\"An interface for registering custom Rez subcommand To register plugin and expose",
"= self.type_settings.get(self.name()) @classmethod def name(cls): \"\"\"Return the name of the Command and rez-subcommand.\"\"\"",
"{} @classmethod def name(cls): return \"foo\" def register_plugin(): return CommandFoo \"\"\" def __init__(self):",
"for registering custom Rez subcommand To register plugin and expose subcommand, the plugin",
"None, # optional: None, \"passthrough\", \"grouped\" } def setup_parser(parser, completions=False): parser.add_argument(\"--hello\", ...) def",
"To register plugin and expose subcommand, the plugin module.. * MUST have a",
"'''The docstring for command help, this is required. ''' from rez.command import Command",
"None, \"passthrough\", \"grouped\" } def setup_parser(parser, completions=False): parser.add_argument(\"--hello\", ...) def command(opts, parser=None, extra_arg_groups=None):",
"rez.command import Command command_behavior = { \"hidden\": False, # optional: bool \"arg_mode\": None,",
"def __init__(self): self.type_settings = config.plugins.extension self.settings = self.type_settings.get(self.name()) @classmethod def name(cls): \"\"\"Return the",
"the Rez Project from rez.config import config class Command(object): \"\"\"An interface for registering",
"module-level docstring (used as the command help) * MUST provide a `setup_parser()` function",
"provide a `register_plugin()` function * SHOULD have a module-level attribute `command_behavior` For example,",
"is required. ''' from rez.command import Command command_behavior = { \"hidden\": False, #",
"MUST provide a `command()` function * MUST provide a `register_plugin()` function * SHOULD",
"the command help) * MUST provide a `setup_parser()` function * MUST provide a",
"command help, this is required. ''' from rez.command import Command command_behavior = {",
"a `command()` function * MUST provide a `register_plugin()` function * SHOULD have a",
"* SHOULD have a module-level attribute `command_behavior` For example, a plugin named 'foo'",
"help) * MUST provide a `setup_parser()` function * MUST provide a `command()` function",
"a `register_plugin()` function * SHOULD have a module-level attribute `command_behavior` For example, a",
"the plugin module.. * MUST have a module-level docstring (used as the command",
"# optional: bool \"arg_mode\": None, # optional: None, \"passthrough\", \"grouped\" } def setup_parser(parser,",
"config class Command(object): \"\"\"An interface for registering custom Rez subcommand To register plugin",
"bool \"arg_mode\": None, # optional: None, \"passthrough\", \"grouped\" } def setup_parser(parser, completions=False): parser.add_argument(\"--hello\",",
"schema_dict = {} @classmethod def name(cls): return \"foo\" def register_plugin(): return CommandFoo \"\"\"",
"print(\"world\") class CommandFoo(Command): schema_dict = {} @classmethod def name(cls): return \"foo\" def register_plugin():",
"register_plugin(): return CommandFoo \"\"\" def __init__(self): self.type_settings = config.plugins.extension self.settings = self.type_settings.get(self.name()) @classmethod",
"'foo' and this is the `foo.py`: '''The docstring for command help, this is",
"function * MUST provide a `register_plugin()` function * SHOULD have a module-level attribute",
"plugin named 'foo' and this is the `foo.py`: '''The docstring for command help,",
"SHOULD have a module-level attribute `command_behavior` For example, a plugin named 'foo' and",
"register plugin and expose subcommand, the plugin module.. * MUST have a module-level",
"Apache-2.0 # Copyright Contributors to the Rez Project from rez.config import config class",
"self.settings = self.type_settings.get(self.name()) @classmethod def name(cls): \"\"\"Return the name of the Command and",
"MUST have a module-level docstring (used as the command help) * MUST provide",
"For example, a plugin named 'foo' and this is the `foo.py`: '''The docstring",
"\"foo\" def register_plugin(): return CommandFoo \"\"\" def __init__(self): self.type_settings = config.plugins.extension self.settings =",
"a plugin named 'foo' and this is the `foo.py`: '''The docstring for command",
"function * MUST provide a `command()` function * MUST provide a `register_plugin()` function",
"`command()` function * MUST provide a `register_plugin()` function * SHOULD have a module-level",
"required. ''' from rez.command import Command command_behavior = { \"hidden\": False, # optional:",
"function * SHOULD have a module-level attribute `command_behavior` For example, a plugin named",
"\"hidden\": False, # optional: bool \"arg_mode\": None, # optional: None, \"passthrough\", \"grouped\" }",
"return CommandFoo \"\"\" def __init__(self): self.type_settings = config.plugins.extension self.settings = self.type_settings.get(self.name()) @classmethod def",
"def command(opts, parser=None, extra_arg_groups=None): if opts.hello: print(\"world\") class CommandFoo(Command): schema_dict = {} @classmethod",
"extra_arg_groups=None): if opts.hello: print(\"world\") class CommandFoo(Command): schema_dict = {} @classmethod def name(cls): return",
"def register_plugin(): return CommandFoo \"\"\" def __init__(self): self.type_settings = config.plugins.extension self.settings = self.type_settings.get(self.name())",
"plugin module.. * MUST have a module-level docstring (used as the command help)",
"__init__(self): self.type_settings = config.plugins.extension self.settings = self.type_settings.get(self.name()) @classmethod def name(cls): \"\"\"Return the name",
"CommandFoo \"\"\" def __init__(self): self.type_settings = config.plugins.extension self.settings = self.type_settings.get(self.name()) @classmethod def name(cls):",
"self.type_settings = config.plugins.extension self.settings = self.type_settings.get(self.name()) @classmethod def name(cls): \"\"\"Return the name of",
"a module-level docstring (used as the command help) * MUST provide a `setup_parser()`",
"Contributors to the Rez Project from rez.config import config class Command(object): \"\"\"An interface",
"* MUST provide a `register_plugin()` function * SHOULD have a module-level attribute `command_behavior`",
"command(opts, parser=None, extra_arg_groups=None): if opts.hello: print(\"world\") class CommandFoo(Command): schema_dict = {} @classmethod def",
"`foo.py`: '''The docstring for command help, this is required. ''' from rez.command import",
"\"\"\" def __init__(self): self.type_settings = config.plugins.extension self.settings = self.type_settings.get(self.name()) @classmethod def name(cls): \"\"\"Return",
"(used as the command help) * MUST provide a `setup_parser()` function * MUST",
"provide a `command()` function * MUST provide a `register_plugin()` function * SHOULD have",
"`register_plugin()` function * SHOULD have a module-level attribute `command_behavior` For example, a plugin",
"class CommandFoo(Command): schema_dict = {} @classmethod def name(cls): return \"foo\" def register_plugin(): return",
"= {} @classmethod def name(cls): return \"foo\" def register_plugin(): return CommandFoo \"\"\" def",
"this is the `foo.py`: '''The docstring for command help, this is required. '''",
"provide a `setup_parser()` function * MUST provide a `command()` function * MUST provide",
"@classmethod def name(cls): return \"foo\" def register_plugin(): return CommandFoo \"\"\" def __init__(self): self.type_settings",
"} def setup_parser(parser, completions=False): parser.add_argument(\"--hello\", ...) def command(opts, parser=None, extra_arg_groups=None): if opts.hello: print(\"world\")",
"* MUST have a module-level docstring (used as the command help) * MUST",
"Command command_behavior = { \"hidden\": False, # optional: bool \"arg_mode\": None, # optional:",
"this is required. ''' from rez.command import Command command_behavior = { \"hidden\": False,",
"= config.plugins.extension self.settings = self.type_settings.get(self.name()) @classmethod def name(cls): \"\"\"Return the name of the",
"to the Rez Project from rez.config import config class Command(object): \"\"\"An interface for",
"command_behavior = { \"hidden\": False, # optional: bool \"arg_mode\": None, # optional: None,",
"\"grouped\" } def setup_parser(parser, completions=False): parser.add_argument(\"--hello\", ...) def command(opts, parser=None, extra_arg_groups=None): if opts.hello:",
"help, this is required. ''' from rez.command import Command command_behavior = { \"hidden\":",
"parser.add_argument(\"--hello\", ...) def command(opts, parser=None, extra_arg_groups=None): if opts.hello: print(\"world\") class CommandFoo(Command): schema_dict =",
"and this is the `foo.py`: '''The docstring for command help, this is required.",
"setup_parser(parser, completions=False): parser.add_argument(\"--hello\", ...) def command(opts, parser=None, extra_arg_groups=None): if opts.hello: print(\"world\") class CommandFoo(Command):",
"module-level attribute `command_behavior` For example, a plugin named 'foo' and this is the",
"name(cls): return \"foo\" def register_plugin(): return CommandFoo \"\"\" def __init__(self): self.type_settings = config.plugins.extension",
"# Copyright Contributors to the Rez Project from rez.config import config class Command(object):",
"\"arg_mode\": None, # optional: None, \"passthrough\", \"grouped\" } def setup_parser(parser, completions=False): parser.add_argument(\"--hello\", ...)",
"{ \"hidden\": False, # optional: bool \"arg_mode\": None, # optional: None, \"passthrough\", \"grouped\"",
"attribute `command_behavior` For example, a plugin named 'foo' and this is the `foo.py`:",
"`setup_parser()` function * MUST provide a `command()` function * MUST provide a `register_plugin()`",
"import Command command_behavior = { \"hidden\": False, # optional: bool \"arg_mode\": None, #",
"self.type_settings.get(self.name()) @classmethod def name(cls): \"\"\"Return the name of the Command and rez-subcommand.\"\"\" raise",
"docstring for command help, this is required. ''' from rez.command import Command command_behavior",
"MUST provide a `setup_parser()` function * MUST provide a `command()` function * MUST",
"SPDX-License-Identifier: Apache-2.0 # Copyright Contributors to the Rez Project from rez.config import config",
"\"passthrough\", \"grouped\" } def setup_parser(parser, completions=False): parser.add_argument(\"--hello\", ...) def command(opts, parser=None, extra_arg_groups=None): if",
"a `setup_parser()` function * MUST provide a `command()` function * MUST provide a"
] |
[
"options.subject: columns.append(\"subject_id\") headers.append(\"Subject Id\") if options.classification: columns.append(\"classification_id\") headers.append(\"Classification Id\") if options.source: columns.append(\"cluster_id\") headers.append(\"Source",
"columns.append(\"classification_id\") headers.append(\"Classification Id\") if options.source: columns.append(\"cluster_id\") headers.append(\"Source Id\") if len(columns) == 0: raise",
"(c) 2021 # # See the LICENSE file for details # see the",
"headers.append(\"User Id\") elif options.anon_user: columns.append(\"user_ip\") headers.append(\"User IP\") where = \"WHERE user_id IS NULL\"",
"return sql, headers # ======== # COMMANDS # ======== def view(connection, options): sql,",
"# ---------------------------------------------------------------------- # Copyright (c) 2021 # # See the LICENSE file for",
"-*- # ---------------------------------------------------------------------- # Copyright (c) 2021 # # See the LICENSE file",
"= list() where = \"\" if options.workflow: columns.append(\"workflow_id\") headers.append(\"Workflow Id\") if options.user: columns.append(\"user_id\")",
"def view(connection, options): sql, headers = dynamic_sql(options) cursor = connection.cursor() cursor.execute(sql) paging( iterable",
"the LICENSE file for details # see the AUTHORS file for authors #",
"file for details # see the AUTHORS file for authors # ---------------------------------------------------------------------- #--------------------",
"see the AUTHORS file for authors # ---------------------------------------------------------------------- #-------------------- # System wide imports",
"os.path import logging #-------------- # local imports # ------------- from streetool.utils import get_image,",
"be specified\") headers.append(\"# Classif\") sql = f\"SELECT {','.join(columns)}, COUNT(*) FROM spectra_classification_v {where} GROUP",
"headers.append(\"# Classif\") sql = f\"SELECT {','.join(columns)}, COUNT(*) FROM spectra_classification_v {where} GROUP BY {','.join(columns)}",
"Classif\") sql = f\"SELECT {','.join(columns)}, COUNT(*) FROM spectra_classification_v {where} GROUP BY {','.join(columns)} ORDER",
"authors # ---------------------------------------------------------------------- #-------------------- # System wide imports # ------------------- import os import",
"headers = list() where = \"\" if options.workflow: columns.append(\"workflow_id\") headers.append(\"Workflow Id\") if options.user:",
"columns.append(\"subject_id\") headers.append(\"Subject Id\") if options.classification: columns.append(\"classification_id\") headers.append(\"Classification Id\") if options.source: columns.append(\"cluster_id\") headers.append(\"Source Id\")",
"= dynamic_sql(options) cursor = connection.cursor() cursor.execute(sql) paging( iterable = cursor, headers = headers,",
"list() where = \"\" if options.workflow: columns.append(\"workflow_id\") headers.append(\"Workflow Id\") if options.user: columns.append(\"user_id\") headers.append(\"User",
"get_image, paging # ----------------------- # Module global variables # ----------------------- log = logging.getLogger(\"streetoool\")",
"constants # ---------------- def dynamic_sql(options): columns = list() headers = list() where =",
"coding: utf-8 -*- # ---------------------------------------------------------------------- # Copyright (c) 2021 # # See the",
"options.user: columns.append(\"user_id\") headers.append(\"User Id\") elif options.anon_user: columns.append(\"user_ip\") headers.append(\"User IP\") where = \"WHERE user_id",
"{','.join(columns)} ORDER BY {','.join(columns)}\" return sql, headers # ======== # COMMANDS # ========",
"System wide imports # ------------------- import os import os.path import logging #-------------- #",
"------------- from streetool.utils import get_image, paging # ----------------------- # Module global variables #",
"0: raise ValueError(\"At least one --<flag> must be specified\") headers.append(\"# Classif\") sql =",
"logging.getLogger(\"streetoool\") # ---------------- # Module constants # ---------------- def dynamic_sql(options): columns = list()",
"len(columns) == 0: raise ValueError(\"At least one --<flag> must be specified\") headers.append(\"# Classif\")",
"GROUP BY {','.join(columns)} ORDER BY {','.join(columns)}\" return sql, headers # ======== # COMMANDS",
"---------------- def dynamic_sql(options): columns = list() headers = list() where = \"\" if",
"{','.join(columns)}\" return sql, headers # ======== # COMMANDS # ======== def view(connection, options):",
"-*- coding: utf-8 -*- # ---------------------------------------------------------------------- # Copyright (c) 2021 # # See",
"dynamic_sql(options): columns = list() headers = list() where = \"\" if options.workflow: columns.append(\"workflow_id\")",
"if options.classification: columns.append(\"classification_id\") headers.append(\"Classification Id\") if options.source: columns.append(\"cluster_id\") headers.append(\"Source Id\") if len(columns) ==",
"Id\") if options.source: columns.append(\"cluster_id\") headers.append(\"Source Id\") if len(columns) == 0: raise ValueError(\"At least",
"# Module constants # ---------------- def dynamic_sql(options): columns = list() headers = list()",
"= \"WHERE user_id IS NULL\" if options.subject: columns.append(\"subject_id\") headers.append(\"Subject Id\") if options.classification: columns.append(\"classification_id\")",
"See the LICENSE file for details # see the AUTHORS file for authors",
"# # See the LICENSE file for details # see the AUTHORS file",
"# see the AUTHORS file for authors # ---------------------------------------------------------------------- #-------------------- # System wide",
"# ------------- from streetool.utils import get_image, paging # ----------------------- # Module global variables",
"if options.user: columns.append(\"user_id\") headers.append(\"User Id\") elif options.anon_user: columns.append(\"user_ip\") headers.append(\"User IP\") where = \"WHERE",
"headers.append(\"Workflow Id\") if options.user: columns.append(\"user_id\") headers.append(\"User Id\") elif options.anon_user: columns.append(\"user_ip\") headers.append(\"User IP\") where",
"view(connection, options): sql, headers = dynamic_sql(options) cursor = connection.cursor() cursor.execute(sql) paging( iterable =",
"Id\") elif options.anon_user: columns.append(\"user_ip\") headers.append(\"User IP\") where = \"WHERE user_id IS NULL\" if",
"columns.append(\"cluster_id\") headers.append(\"Source Id\") if len(columns) == 0: raise ValueError(\"At least one --<flag> must",
"log = logging.getLogger(\"streetoool\") # ---------------- # Module constants # ---------------- def dynamic_sql(options): columns",
"utf-8 -*- # ---------------------------------------------------------------------- # Copyright (c) 2021 # # See the LICENSE",
"if len(columns) == 0: raise ValueError(\"At least one --<flag> must be specified\") headers.append(\"#",
"# COMMANDS # ======== def view(connection, options): sql, headers = dynamic_sql(options) cursor =",
"user_id IS NULL\" if options.subject: columns.append(\"subject_id\") headers.append(\"Subject Id\") if options.classification: columns.append(\"classification_id\") headers.append(\"Classification Id\")",
"where = \"WHERE user_id IS NULL\" if options.subject: columns.append(\"subject_id\") headers.append(\"Subject Id\") if options.classification:",
"import get_image, paging # ----------------------- # Module global variables # ----------------------- log =",
"local imports # ------------- from streetool.utils import get_image, paging # ----------------------- # Module",
"specified\") headers.append(\"# Classif\") sql = f\"SELECT {','.join(columns)}, COUNT(*) FROM spectra_classification_v {where} GROUP BY",
"# ----------------------- log = logging.getLogger(\"streetoool\") # ---------------- # Module constants # ---------------- def",
"ValueError(\"At least one --<flag> must be specified\") headers.append(\"# Classif\") sql = f\"SELECT {','.join(columns)},",
"one --<flag> must be specified\") headers.append(\"# Classif\") sql = f\"SELECT {','.join(columns)}, COUNT(*) FROM",
"# ======== def view(connection, options): sql, headers = dynamic_sql(options) cursor = connection.cursor() cursor.execute(sql)",
"headers.append(\"Source Id\") if len(columns) == 0: raise ValueError(\"At least one --<flag> must be",
"# -*- coding: utf-8 -*- # ---------------------------------------------------------------------- # Copyright (c) 2021 # #",
"sql = f\"SELECT {','.join(columns)}, COUNT(*) FROM spectra_classification_v {where} GROUP BY {','.join(columns)} ORDER BY",
"# ---------------------------------------------------------------------- #-------------------- # System wide imports # ------------------- import os import os.path",
"# ---------------- def dynamic_sql(options): columns = list() headers = list() where = \"\"",
"\"WHERE user_id IS NULL\" if options.subject: columns.append(\"subject_id\") headers.append(\"Subject Id\") if options.classification: columns.append(\"classification_id\") headers.append(\"Classification",
"where = \"\" if options.workflow: columns.append(\"workflow_id\") headers.append(\"Workflow Id\") if options.user: columns.append(\"user_id\") headers.append(\"User Id\")",
"IP\") where = \"WHERE user_id IS NULL\" if options.subject: columns.append(\"subject_id\") headers.append(\"Subject Id\") if",
"headers # ======== # COMMANDS # ======== def view(connection, options): sql, headers =",
"options.classification: columns.append(\"classification_id\") headers.append(\"Classification Id\") if options.source: columns.append(\"cluster_id\") headers.append(\"Source Id\") if len(columns) == 0:",
"== 0: raise ValueError(\"At least one --<flag> must be specified\") headers.append(\"# Classif\") sql",
"# Module global variables # ----------------------- log = logging.getLogger(\"streetoool\") # ---------------- # Module",
"headers.append(\"User IP\") where = \"WHERE user_id IS NULL\" if options.subject: columns.append(\"subject_id\") headers.append(\"Subject Id\")",
"----------------------- log = logging.getLogger(\"streetoool\") # ---------------- # Module constants # ---------------- def dynamic_sql(options):",
"AUTHORS file for authors # ---------------------------------------------------------------------- #-------------------- # System wide imports # -------------------",
"# Copyright (c) 2021 # # See the LICENSE file for details #",
"list() headers = list() where = \"\" if options.workflow: columns.append(\"workflow_id\") headers.append(\"Workflow Id\") if",
"# ======== # COMMANDS # ======== def view(connection, options): sql, headers = dynamic_sql(options)",
"wide imports # ------------------- import os import os.path import logging #-------------- # local",
"{','.join(columns)}, COUNT(*) FROM spectra_classification_v {where} GROUP BY {','.join(columns)} ORDER BY {','.join(columns)}\" return sql,",
"IS NULL\" if options.subject: columns.append(\"subject_id\") headers.append(\"Subject Id\") if options.classification: columns.append(\"classification_id\") headers.append(\"Classification Id\") if",
"Module global variables # ----------------------- log = logging.getLogger(\"streetoool\") # ---------------- # Module constants",
"COUNT(*) FROM spectra_classification_v {where} GROUP BY {','.join(columns)} ORDER BY {','.join(columns)}\" return sql, headers",
"# local imports # ------------- from streetool.utils import get_image, paging # ----------------------- #",
"BY {','.join(columns)} ORDER BY {','.join(columns)}\" return sql, headers # ======== # COMMANDS #",
"ORDER BY {','.join(columns)}\" return sql, headers # ======== # COMMANDS # ======== def",
"======== # COMMANDS # ======== def view(connection, options): sql, headers = dynamic_sql(options) cursor",
"# See the LICENSE file for details # see the AUTHORS file for",
"columns = list() headers = list() where = \"\" if options.workflow: columns.append(\"workflow_id\") headers.append(\"Workflow",
"global variables # ----------------------- log = logging.getLogger(\"streetoool\") # ---------------- # Module constants #",
"must be specified\") headers.append(\"# Classif\") sql = f\"SELECT {','.join(columns)}, COUNT(*) FROM spectra_classification_v {where}",
"COMMANDS # ======== def view(connection, options): sql, headers = dynamic_sql(options) cursor = connection.cursor()",
"----------------------- # Module global variables # ----------------------- log = logging.getLogger(\"streetoool\") # ---------------- #",
"if options.subject: columns.append(\"subject_id\") headers.append(\"Subject Id\") if options.classification: columns.append(\"classification_id\") headers.append(\"Classification Id\") if options.source: columns.append(\"cluster_id\")",
"= f\"SELECT {','.join(columns)}, COUNT(*) FROM spectra_classification_v {where} GROUP BY {','.join(columns)} ORDER BY {','.join(columns)}\"",
"options.anon_user: columns.append(\"user_ip\") headers.append(\"User IP\") where = \"WHERE user_id IS NULL\" if options.subject: columns.append(\"subject_id\")",
"for authors # ---------------------------------------------------------------------- #-------------------- # System wide imports # ------------------- import os",
"paging # ----------------------- # Module global variables # ----------------------- log = logging.getLogger(\"streetoool\") #",
"--<flag> must be specified\") headers.append(\"# Classif\") sql = f\"SELECT {','.join(columns)}, COUNT(*) FROM spectra_classification_v",
"---------------------------------------------------------------------- #-------------------- # System wide imports # ------------------- import os import os.path import",
"options.workflow: columns.append(\"workflow_id\") headers.append(\"Workflow Id\") if options.user: columns.append(\"user_id\") headers.append(\"User Id\") elif options.anon_user: columns.append(\"user_ip\") headers.append(\"User",
"file for authors # ---------------------------------------------------------------------- #-------------------- # System wide imports # ------------------- import",
"import os import os.path import logging #-------------- # local imports # ------------- from",
"details # see the AUTHORS file for authors # ---------------------------------------------------------------------- #-------------------- # System",
"{where} GROUP BY {','.join(columns)} ORDER BY {','.join(columns)}\" return sql, headers # ======== #",
"imports # ------------------- import os import os.path import logging #-------------- # local imports",
"columns.append(\"workflow_id\") headers.append(\"Workflow Id\") if options.user: columns.append(\"user_id\") headers.append(\"User Id\") elif options.anon_user: columns.append(\"user_ip\") headers.append(\"User IP\")",
"os import os.path import logging #-------------- # local imports # ------------- from streetool.utils",
"sql, headers = dynamic_sql(options) cursor = connection.cursor() cursor.execute(sql) paging( iterable = cursor, headers",
"\"\" if options.workflow: columns.append(\"workflow_id\") headers.append(\"Workflow Id\") if options.user: columns.append(\"user_id\") headers.append(\"User Id\") elif options.anon_user:",
"Module constants # ---------------- def dynamic_sql(options): columns = list() headers = list() where",
"======== def view(connection, options): sql, headers = dynamic_sql(options) cursor = connection.cursor() cursor.execute(sql) paging(",
"LICENSE file for details # see the AUTHORS file for authors # ----------------------------------------------------------------------",
"options): sql, headers = dynamic_sql(options) cursor = connection.cursor() cursor.execute(sql) paging( iterable = cursor,",
"FROM spectra_classification_v {where} GROUP BY {','.join(columns)} ORDER BY {','.join(columns)}\" return sql, headers #",
"logging #-------------- # local imports # ------------- from streetool.utils import get_image, paging #",
"columns.append(\"user_id\") headers.append(\"User Id\") elif options.anon_user: columns.append(\"user_ip\") headers.append(\"User IP\") where = \"WHERE user_id IS",
"least one --<flag> must be specified\") headers.append(\"# Classif\") sql = f\"SELECT {','.join(columns)}, COUNT(*)",
"= \"\" if options.workflow: columns.append(\"workflow_id\") headers.append(\"Workflow Id\") if options.user: columns.append(\"user_id\") headers.append(\"User Id\") elif",
"headers.append(\"Subject Id\") if options.classification: columns.append(\"classification_id\") headers.append(\"Classification Id\") if options.source: columns.append(\"cluster_id\") headers.append(\"Source Id\") if",
"sql, headers # ======== # COMMANDS # ======== def view(connection, options): sql, headers",
"Id\") if options.user: columns.append(\"user_id\") headers.append(\"User Id\") elif options.anon_user: columns.append(\"user_ip\") headers.append(\"User IP\") where =",
"f\"SELECT {','.join(columns)}, COUNT(*) FROM spectra_classification_v {where} GROUP BY {','.join(columns)} ORDER BY {','.join(columns)}\" return",
"NULL\" if options.subject: columns.append(\"subject_id\") headers.append(\"Subject Id\") if options.classification: columns.append(\"classification_id\") headers.append(\"Classification Id\") if options.source:",
"dynamic_sql(options) cursor = connection.cursor() cursor.execute(sql) paging( iterable = cursor, headers = headers, )",
"the AUTHORS file for authors # ---------------------------------------------------------------------- #-------------------- # System wide imports #",
"streetool.utils import get_image, paging # ----------------------- # Module global variables # ----------------------- log",
"= logging.getLogger(\"streetoool\") # ---------------- # Module constants # ---------------- def dynamic_sql(options): columns =",
"# ---------------- # Module constants # ---------------- def dynamic_sql(options): columns = list() headers",
"#-------------------- # System wide imports # ------------------- import os import os.path import logging",
"if options.workflow: columns.append(\"workflow_id\") headers.append(\"Workflow Id\") if options.user: columns.append(\"user_id\") headers.append(\"User Id\") elif options.anon_user: columns.append(\"user_ip\")",
"2021 # # See the LICENSE file for details # see the AUTHORS",
"# ------------------- import os import os.path import logging #-------------- # local imports #",
"BY {','.join(columns)}\" return sql, headers # ======== # COMMANDS # ======== def view(connection,",
"#-------------- # local imports # ------------- from streetool.utils import get_image, paging # -----------------------",
"elif options.anon_user: columns.append(\"user_ip\") headers.append(\"User IP\") where = \"WHERE user_id IS NULL\" if options.subject:",
"Id\") if len(columns) == 0: raise ValueError(\"At least one --<flag> must be specified\")",
"= list() headers = list() where = \"\" if options.workflow: columns.append(\"workflow_id\") headers.append(\"Workflow Id\")",
"def dynamic_sql(options): columns = list() headers = list() where = \"\" if options.workflow:",
"headers.append(\"Classification Id\") if options.source: columns.append(\"cluster_id\") headers.append(\"Source Id\") if len(columns) == 0: raise ValueError(\"At",
"for details # see the AUTHORS file for authors # ---------------------------------------------------------------------- #-------------------- #",
"# System wide imports # ------------------- import os import os.path import logging #--------------",
"---------------- # Module constants # ---------------- def dynamic_sql(options): columns = list() headers =",
"import os.path import logging #-------------- # local imports # ------------- from streetool.utils import",
"columns.append(\"user_ip\") headers.append(\"User IP\") where = \"WHERE user_id IS NULL\" if options.subject: columns.append(\"subject_id\") headers.append(\"Subject",
"spectra_classification_v {where} GROUP BY {','.join(columns)} ORDER BY {','.join(columns)}\" return sql, headers # ========",
"variables # ----------------------- log = logging.getLogger(\"streetoool\") # ---------------- # Module constants # ----------------",
"options.source: columns.append(\"cluster_id\") headers.append(\"Source Id\") if len(columns) == 0: raise ValueError(\"At least one --<flag>",
"headers = dynamic_sql(options) cursor = connection.cursor() cursor.execute(sql) paging( iterable = cursor, headers =",
"Copyright (c) 2021 # # See the LICENSE file for details # see",
"raise ValueError(\"At least one --<flag> must be specified\") headers.append(\"# Classif\") sql = f\"SELECT",
"from streetool.utils import get_image, paging # ----------------------- # Module global variables # -----------------------",
"# ----------------------- # Module global variables # ----------------------- log = logging.getLogger(\"streetoool\") # ----------------",
"if options.source: columns.append(\"cluster_id\") headers.append(\"Source Id\") if len(columns) == 0: raise ValueError(\"At least one",
"------------------- import os import os.path import logging #-------------- # local imports # -------------",
"imports # ------------- from streetool.utils import get_image, paging # ----------------------- # Module global",
"Id\") if options.classification: columns.append(\"classification_id\") headers.append(\"Classification Id\") if options.source: columns.append(\"cluster_id\") headers.append(\"Source Id\") if len(columns)",
"---------------------------------------------------------------------- # Copyright (c) 2021 # # See the LICENSE file for details",
"import logging #-------------- # local imports # ------------- from streetool.utils import get_image, paging"
] |
[
"\"precision\": precision, \"f1\": f1, \"ambig_binary_crossentropy\": ambig_binary_crossentropy, \"ambig_mean_absolute_error\": ambig_mean_absolute_error, \"ambig_mean_squared_error\": ambig_mean_squared_error, \"MultichannelMultinomialNLL\": MultichannelMultinomialNLL, }",
":, i] = np.sum( hypothetical_contribs, axis=-1 ) to_return.append(np.mean(projected_hypothetical_contribs, axis=0)) to_return.append(np.zeros_like(orig_inp[1])) return to_return def",
"hypothetical_input[None, :, :] - bg_data[l] ) hypothetical_contribs = hypothetical_difference_from_reference * mult[l] projected_hypothetical_contribs[:, :,",
"bg_data): to_return = [] for l in [0]: projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\") assert len(orig_inp[l].shape)",
"in the peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks = peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0] print(\"sampled peaks:\"+str(nrow)) #allocate",
"inputs start_index=0 while start_index < nrow: cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i) for",
"\"fpr\": fpr, \"fnr\": fnr, \"precision\": precision, \"f1\": f1, \"ambig_binary_crossentropy\": ambig_binary_crossentropy, \"ambig_mean_absolute_error\": ambig_mean_absolute_error, \"ambig_mean_squared_error\":",
"line in chrom_sizes: tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta) # load the model model = load_model_wrapper(args.model_hdf5)",
"precision, \"f1\": f1, \"ambig_binary_crossentropy\": ambig_binary_crossentropy, \"ambig_mean_absolute_error\": ambig_mean_absolute_error, \"ambig_mean_squared_error\": ambig_mean_squared_error, \"MultichannelMultinomialNLL\": MultichannelMultinomialNLL, } get_custom_objects().update(custom_objects)",
"if args.dinuc_shuffle_input is True: seq_batch=[dinuc_shuffle(i) for i in seq_batch] seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get the",
"in range(numshuffles)]), ] def main(): args = parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={} for line in",
"hypothetical_contribs, axis=-1 ) to_return.append(np.mean(projected_hypothetical_contribs, axis=0)) to_return.append(np.zeros_like(orig_inp[1])) return to_return def shuffle_several_times(s): numshuffles = 20",
"20 return [ np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]), np.array([s[1] for i in range(numshuffles)]),",
"import pysam import shap import tensorflow from deeplift.dinuc_shuffle import dinuc_shuffle from scipy.spatial.distance import",
"import pandas as pd from kerasAC.interpret.deepshap import * from kerasAC.interpret.profile_shap import * from",
"i in range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in range(cur_batch_size)] if args.dinuc_shuffle_input is True: seq_batch=[dinuc_shuffle(i)",
"i in seq_batch] seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get the hypothetical scores for the batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]=",
"#get the hypothetical scores for the batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0])",
"load_model_wrapper(args.model_hdf5) print(\"loaded model\") # create the count & profile explainers model_wrapper = (model.input,",
"numpy arrays for modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted output arrays\") #generate",
"2 for i in range(orig_inp[l].shape[-1]): hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:, i] = 1.0 hypothetical_difference_from_reference",
"from kerasAC.interpret.deepshap import * from kerasAC.interpret.profile_shap import * from kerasAC.util import * from",
"import math import pysam import shap import tensorflow from deeplift.dinuc_shuffle import dinuc_shuffle from",
"for i in range(numshuffles)]), np.array([s[1] for i in range(numshuffles)]), ] def main(): args",
"args = parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={} for line in chrom_sizes: tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta) #",
"deeplift.dinuc_shuffle import dinuc_shuffle from scipy.spatial.distance import jensenshannon from scipy.special import logit, softmax tensorflow.compat.v1.disable_v2_behavior()",
"parser.add_argument(\"--flank_size\", type=int, default=1057) parser.add_argument(\"--batch_size\",type=int,default=100) return parser.parse_args() def load_model_wrapper(model_hdf5): # load the model! custom_objects",
"np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size #save print(\"saving outputs\") np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores) np.save(args.out_prefix+'.observed.profile.npy',observed_profile_scores) np.save(args.out_prefix+'.hyp.count.npy',hypothetical_count_scores) np.save(args.out_prefix+'.observed.count.npy',observed_count_scores) np.save(args.out_prefix+'.seq.npy',seq) if __name__",
"kerasAC.util import * from tensorflow.keras.models import load_model from tensorflow.keras.utils import get_custom_objects from kerasAC.custom_losses",
"len(orig_inp[l].shape) == 2 for i in range(orig_inp[l].shape[-1]): hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:, i] =",
"np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]), np.array([s[1] for i in range(numshuffles)]), ] def main():",
"scores for the batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size #save",
"[] for l in [0]: projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\") assert len(orig_inp[l].shape) == 2 for",
"from scipy.spatial.distance import jensenshannon from scipy.special import logit, softmax tensorflow.compat.v1.disable_v2_behavior() import kerasAC import",
"projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\") assert len(orig_inp[l].shape) == 2 for i in range(orig_inp[l].shape[-1]): hypothetical_input =",
"def load_model_wrapper(model_hdf5): # load the model! custom_objects = { \"recall\": recall, \"sensitivity\": recall,",
"tensorflow from deeplift.dinuc_shuffle import dinuc_shuffle from scipy.spatial.distance import jensenshannon from scipy.special import logit,",
"scoring\") parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\") parser.add_argument( \"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" ) parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\",",
") hypothetical_contribs = hypothetical_difference_from_reference * mult[l] projected_hypothetical_contribs[:, :, i] = np.sum( hypothetical_contribs, axis=-1",
"arrays\") #generate one-hot-encoded inputs start_index=0 while start_index < nrow: cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size",
"kerasAC.custom_losses import * from kerasAC.metrics import * def parse_args(): parser = argparse.ArgumentParser(description=\"Argument Parser",
"main(): args = parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={} for line in chrom_sizes: tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta)",
"explainers\") #read in the peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks = peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0] print(\"sampled",
"mult[l] projected_hypothetical_contribs[:, :, i] = np.sum( hypothetical_contribs, axis=-1 ) to_return.append(np.mean(projected_hypothetical_contribs, axis=0)) to_return.append(np.zeros_like(orig_inp[1])) return",
"hypothetical scores for the batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size",
"pd from kerasAC.interpret.deepshap import * from kerasAC.interpret.profile_shap import * from kerasAC.util import *",
"* def parse_args(): parser = argparse.ArgumentParser(description=\"Argument Parser for SNP scoring\") parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000)",
"import * from kerasAC.interpret.profile_shap import * from kerasAC.util import * from tensorflow.keras.models import",
"start_index < nrow: cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i) for i in batch_start_pos]",
"default=1057) parser.add_argument(\"--batch_size\",type=int,default=100) return parser.parse_args() def load_model_wrapper(model_hdf5): # load the model! custom_objects = {",
"return load_model(model_hdf5) def combine_mult_and_diffref(mult, orig_inp, bg_data): to_return = [] for l in [0]:",
"get_custom_objects from kerasAC.custom_losses import * from kerasAC.metrics import * def parse_args(): parser =",
"print(\"loaded model\") # create the count & profile explainers model_wrapper = (model.input, model.outputs[1][:,",
"count & profile explainers model_wrapper = (model.input, model.outputs[1][:, 0:1]) count_explainer = shap.DeepExplainer( model_wrapper,",
"parser.add_argument( \"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" ) parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\", type=int, default=1057) parser.add_argument(\"--batch_size\",type=int,default=100) return parser.parse_args()",
"args.dinuc_shuffle_input is True: seq_batch=[dinuc_shuffle(i) for i in seq_batch] seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get the hypothetical",
"get_custom_objects().update(custom_objects) return load_model(model_hdf5) def combine_mult_and_diffref(mult, orig_inp, bg_data): to_return = [] for l in",
"for SNP scoring\") parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\") parser.add_argument( \"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" ) parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\",",
"\"sensitivity\": recall, \"specificity\": specificity, \"fpr\": fpr, \"fnr\": fnr, \"precision\": precision, \"f1\": f1, \"ambig_binary_crossentropy\":",
"= np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:, i] = 1.0 hypothetical_difference_from_reference = ( hypothetical_input[None, :, :] -",
"* from kerasAC.interpret.profile_shap import * from kerasAC.util import * from tensorflow.keras.models import load_model",
"from kerasAC.interpret.profile_shap import * from kerasAC.util import * from tensorflow.keras.models import load_model from",
"space for numpy arrays for modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted output",
"default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\", type=int, default=1057) parser.add_argument(\"--batch_size\",type=int,default=100) return parser.parse_args() def load_model_wrapper(model_hdf5): # load the model!",
"parser.add_argument(\"--out_prefix\") parser.add_argument( \"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" ) parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\", type=int, default=1057) parser.add_argument(\"--batch_size\",type=int,default=100) return",
"\"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" ) parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\", type=int, default=1057) parser.add_argument(\"--batch_size\",type=int,default=100) return parser.parse_args() def",
"\"specificity\": specificity, \"fpr\": fpr, \"fnr\": fnr, \"precision\": precision, \"f1\": f1, \"ambig_binary_crossentropy\": ambig_binary_crossentropy, \"ambig_mean_absolute_error\":",
"explainers model_wrapper = (model.input, model.outputs[1][:, 0:1]) count_explainer = shap.DeepExplainer( model_wrapper, data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac )",
"for l in [0]: projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\") assert len(orig_inp[l].shape) == 2 for i",
"dinuc_shuffle from scipy.spatial.distance import jensenshannon from scipy.special import logit, softmax tensorflow.compat.v1.disable_v2_behavior() import kerasAC",
"for i in batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in",
"projected_hypothetical_contribs[:, :, i] = np.sum( hypothetical_contribs, axis=-1 ) to_return.append(np.mean(projected_hypothetical_contribs, axis=0)) to_return.append(np.zeros_like(orig_inp[1])) return to_return",
"from kerasAC.custom_losses import * from kerasAC.metrics import * def parse_args(): parser = argparse.ArgumentParser(description=\"Argument",
"from tensorflow.keras.utils import get_custom_objects from kerasAC.custom_losses import * from kerasAC.metrics import * def",
"i] = np.sum( hypothetical_contribs, axis=-1 ) to_return.append(np.mean(projected_hypothetical_contribs, axis=0)) to_return.append(np.zeros_like(orig_inp[1])) return to_return def shuffle_several_times(s):",
"def combine_mult_and_diffref(mult, orig_inp, bg_data): to_return = [] for l in [0]: projected_hypothetical_contribs =",
":, :] - bg_data[l] ) hypothetical_contribs = hypothetical_difference_from_reference * mult[l] projected_hypothetical_contribs[:, :, i]",
"in range(cur_batch_size)] if args.dinuc_shuffle_input is True: seq_batch=[dinuc_shuffle(i) for i in seq_batch] seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch",
"observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted output arrays\") #generate one-hot-encoded inputs start_index=0 while start_index < nrow:",
"hypothetical_difference_from_reference = ( hypothetical_input[None, :, :] - bg_data[l] ) hypothetical_contribs = hypothetical_difference_from_reference *",
"count_explainer = shap.DeepExplainer( model_wrapper, data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac ) prof_explainer = create_explainer(model, ischip=False, task_index=0) print(\"made",
"range(numshuffles)]), ] def main(): args = parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={} for line in chrom_sizes:",
"= hypothetical_difference_from_reference * mult[l] projected_hypothetical_contribs[:, :, i] = np.sum( hypothetical_contribs, axis=-1 ) to_return.append(np.mean(projected_hypothetical_contribs,",
"batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in range(cur_batch_size)] if args.dinuc_shuffle_input is",
"np.array([s[1] for i in range(numshuffles)]), ] def main(): args = parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={}",
"print(\"made explainers\") #read in the peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks = peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0]",
"for the batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size #save print(\"saving",
"chrom_size_dict={} for line in chrom_sizes: tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta) # load the model model",
"import argparse import math import pysam import shap import tensorflow from deeplift.dinuc_shuffle import",
"hypothetical_input[:, i] = 1.0 hypothetical_difference_from_reference = ( hypothetical_input[None, :, :] - bg_data[l] )",
"peaks = peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0] print(\"sampled peaks:\"+str(nrow)) #allocate space for numpy arrays for modisco",
"prof_explainer(seq_batch, None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size #save print(\"saving outputs\") np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores) np.save(args.out_prefix+'.observed.profile.npy',observed_profile_scores) np.save(args.out_prefix+'.hyp.count.npy',hypothetical_count_scores)",
"as pd from kerasAC.interpret.deepshap import * from kerasAC.interpret.profile_shap import * from kerasAC.util import",
"ambig_mean_squared_error, \"MultichannelMultinomialNLL\": MultichannelMultinomialNLL, } get_custom_objects().update(custom_objects) return load_model(model_hdf5) def combine_mult_and_diffref(mult, orig_inp, bg_data): to_return =",
"} get_custom_objects().update(custom_objects) return load_model(model_hdf5) def combine_mult_and_diffref(mult, orig_inp, bg_data): to_return = [] for l",
"import get_custom_objects from kerasAC.custom_losses import * from kerasAC.metrics import * def parse_args(): parser",
"observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size #save print(\"saving outputs\") np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores) np.save(args.out_prefix+'.observed.profile.npy',observed_profile_scores) np.save(args.out_prefix+'.hyp.count.npy',hypothetical_count_scores) np.save(args.out_prefix+'.observed.count.npy',observed_count_scores) np.save(args.out_prefix+'.seq.npy',seq)",
"= (model.input, model.outputs[1][:, 0:1]) count_explainer = shap.DeepExplainer( model_wrapper, data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac ) prof_explainer =",
"in range(numshuffles)]), np.array([s[1] for i in range(numshuffles)]), ] def main(): args = parse_args()",
"for i in seq_batch] seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get the hypothetical scores for the batch",
"None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size #save print(\"saving outputs\") np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores) np.save(args.out_prefix+'.observed.profile.npy',observed_profile_scores) np.save(args.out_prefix+'.hyp.count.npy',hypothetical_count_scores) np.save(args.out_prefix+'.observed.count.npy',observed_count_scores)",
"nrow=peaks.shape[0] print(\"sampled peaks:\"+str(nrow)) #allocate space for numpy arrays for modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4))",
"shap import tensorflow from deeplift.dinuc_shuffle import dinuc_shuffle from scipy.spatial.distance import jensenshannon from scipy.special",
"def shuffle_several_times(s): numshuffles = 20 return [ np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]), np.array([s[1]",
"= ( hypothetical_input[None, :, :] - bg_data[l] ) hypothetical_contribs = hypothetical_difference_from_reference * mult[l]",
"model_wrapper = (model.input, model.outputs[1][:, 0:1]) count_explainer = shap.DeepExplainer( model_wrapper, data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac ) prof_explainer",
"model = load_model_wrapper(args.model_hdf5) print(\"loaded model\") # create the count & profile explainers model_wrapper",
"fnr, \"precision\": precision, \"f1\": f1, \"ambig_binary_crossentropy\": ambig_binary_crossentropy, \"ambig_mean_absolute_error\": ambig_mean_absolute_error, \"ambig_mean_squared_error\": ambig_mean_squared_error, \"MultichannelMultinomialNLL\": MultichannelMultinomialNLL,",
"nrow: cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i) for i in batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for",
"hypothetical_contribs = hypothetical_difference_from_reference * mult[l] projected_hypothetical_contribs[:, :, i] = np.sum( hypothetical_contribs, axis=-1 )",
"print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i) for i in batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in",
":] - bg_data[l] ) hypothetical_contribs = hypothetical_difference_from_reference * mult[l] projected_hypothetical_contribs[:, :, i] =",
"tensorflow.keras.utils import get_custom_objects from kerasAC.custom_losses import * from kerasAC.metrics import * def parse_args():",
"hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted output arrays\") #generate one-hot-encoded inputs start_index=0 while start_index",
"MultichannelMultinomialNLL, } get_custom_objects().update(custom_objects) return load_model(model_hdf5) def combine_mult_and_diffref(mult, orig_inp, bg_data): to_return = [] for",
"model.outputs[1][:, 0:1]) count_explainer = shap.DeepExplainer( model_wrapper, data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac ) prof_explainer = create_explainer(model, ischip=False,",
"for modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted output arrays\") #generate one-hot-encoded inputs",
"batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i) for i in batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in range(cur_batch_size)]",
"return [ np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]), np.array([s[1] for i in range(numshuffles)]), ]",
"profile explainers model_wrapper = (model.input, model.outputs[1][:, 0:1]) count_explainer = shap.DeepExplainer( model_wrapper, data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac",
"parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\") parser.add_argument( \"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" ) parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\", type=int, default=1057) parser.add_argument(\"--batch_size\",type=int,default=100)",
"# load the model model = load_model_wrapper(args.model_hdf5) print(\"loaded model\") # create the count",
"from kerasAC.metrics import * def parse_args(): parser = argparse.ArgumentParser(description=\"Argument Parser for SNP scoring\")",
"{ \"recall\": recall, \"sensitivity\": recall, \"specificity\": specificity, \"fpr\": fpr, \"fnr\": fnr, \"precision\": precision,",
"- bg_data[l] ) hypothetical_contribs = hypothetical_difference_from_reference * mult[l] projected_hypothetical_contribs[:, :, i] = np.sum(",
"import * from tensorflow.keras.models import load_model from tensorflow.keras.utils import get_custom_objects from kerasAC.custom_losses import",
"shuffle_several_times(s): numshuffles = 20 return [ np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]), np.array([s[1] for",
"model! custom_objects = { \"recall\": recall, \"sensitivity\": recall, \"specificity\": specificity, \"fpr\": fpr, \"fnr\":",
"batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i) for i in batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size)",
"load the model model = load_model_wrapper(args.model_hdf5) print(\"loaded model\") # create the count &",
"chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta) # load the model model = load_model_wrapper(args.model_hdf5) print(\"loaded model\") # create",
"math import pysam import shap import tensorflow from deeplift.dinuc_shuffle import dinuc_shuffle from scipy.spatial.distance",
"batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size #save print(\"saving outputs\") np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores)",
"tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta) # load the model model = load_model_wrapper(args.model_hdf5) print(\"loaded model\") #",
"Parser for SNP scoring\") parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\") parser.add_argument( \"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" ) parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False)",
"orig_inp, bg_data): to_return = [] for l in [0]: projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\") assert",
"f1, \"ambig_binary_crossentropy\": ambig_binary_crossentropy, \"ambig_mean_absolute_error\": ambig_mean_absolute_error, \"ambig_mean_squared_error\": ambig_mean_squared_error, \"MultichannelMultinomialNLL\": MultichannelMultinomialNLL, } get_custom_objects().update(custom_objects) return load_model(model_hdf5)",
"prof_explainer = create_explainer(model, ischip=False, task_index=0) print(\"made explainers\") #read in the peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0]",
"ambig_binary_crossentropy, \"ambig_mean_absolute_error\": ambig_mean_absolute_error, \"ambig_mean_squared_error\": ambig_mean_squared_error, \"MultichannelMultinomialNLL\": MultichannelMultinomialNLL, } get_custom_objects().update(custom_objects) return load_model(model_hdf5) def combine_mult_and_diffref(mult,",
"\"fnr\": fnr, \"precision\": precision, \"f1\": f1, \"ambig_binary_crossentropy\": ambig_binary_crossentropy, \"ambig_mean_absolute_error\": ambig_mean_absolute_error, \"ambig_mean_squared_error\": ambig_mean_squared_error, \"MultichannelMultinomialNLL\":",
"import load_model from tensorflow.keras.utils import get_custom_objects from kerasAC.custom_losses import * from kerasAC.metrics import",
"type=int, default=1057) parser.add_argument(\"--batch_size\",type=int,default=100) return parser.parse_args() def load_model_wrapper(model_hdf5): # load the model! custom_objects =",
"parse_args(): parser = argparse.ArgumentParser(description=\"Argument Parser for SNP scoring\") parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\") parser.add_argument(",
"seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get the hypothetical scores for the batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]=",
"jensenshannon from scipy.special import logit, softmax tensorflow.compat.v1.disable_v2_behavior() import kerasAC import matplotlib import pandas",
"\"ambig_mean_squared_error\": ambig_mean_squared_error, \"MultichannelMultinomialNLL\": MultichannelMultinomialNLL, } get_custom_objects().update(custom_objects) return load_model(model_hdf5) def combine_mult_and_diffref(mult, orig_inp, bg_data): to_return",
"one-hot-encoded inputs start_index=0 while start_index < nrow: cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i)",
"the peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks = peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0] print(\"sampled peaks:\"+str(nrow)) #allocate space",
"kerasAC import matplotlib import pandas as pd from kerasAC.interpret.deepshap import * from kerasAC.interpret.profile_shap",
"return to_return def shuffle_several_times(s): numshuffles = 20 return [ np.array([dinuc_shuffle(s[0]) for i in",
"seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get the hypothetical scores for the batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch",
"scipy.spatial.distance import jensenshannon from scipy.special import logit, softmax tensorflow.compat.v1.disable_v2_behavior() import kerasAC import matplotlib",
"combine_mult_and_diffref(mult, orig_inp, bg_data): to_return = [] for l in [0]: projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\")",
"# load the model! custom_objects = { \"recall\": recall, \"sensitivity\": recall, \"specificity\": specificity,",
"True: seq_batch=[dinuc_shuffle(i) for i in seq_batch] seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get the hypothetical scores for",
"(model.input, model.outputs[1][:, 0:1]) count_explainer = shap.DeepExplainer( model_wrapper, data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac ) prof_explainer = create_explainer(model,",
"observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size #save print(\"saving outputs\") np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores) np.save(args.out_prefix+'.observed.profile.npy',observed_profile_scores) np.save(args.out_prefix+'.hyp.count.npy',hypothetical_count_scores) np.save(args.out_prefix+'.observed.count.npy',observed_count_scores) np.save(args.out_prefix+'.seq.npy',seq) if __name__ ==",
"specificity, \"fpr\": fpr, \"fnr\": fnr, \"precision\": precision, \"f1\": f1, \"ambig_binary_crossentropy\": ambig_binary_crossentropy, \"ambig_mean_absolute_error\": ambig_mean_absolute_error,",
"for i in range(numshuffles)]), ] def main(): args = parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={} for",
"range(orig_inp[l].shape[-1]): hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:, i] = 1.0 hypothetical_difference_from_reference = ( hypothetical_input[None, :,",
"range(cur_batch_size)] if args.dinuc_shuffle_input is True: seq_batch=[dinuc_shuffle(i) for i in seq_batch] seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get",
"load_model from tensorflow.keras.utils import get_custom_objects from kerasAC.custom_losses import * from kerasAC.metrics import *",
"= load_model_wrapper(args.model_hdf5) print(\"loaded model\") # create the count & profile explainers model_wrapper =",
"cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i) for i in batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i",
"= create_explainer(model, ischip=False, task_index=0) print(\"made explainers\") #read in the peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2)",
"from scipy.special import logit, softmax tensorflow.compat.v1.disable_v2_behavior() import kerasAC import matplotlib import pandas as",
"print(\"pre-allocted output arrays\") #generate one-hot-encoded inputs start_index=0 while start_index < nrow: cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size))",
"argparse.ArgumentParser(description=\"Argument Parser for SNP scoring\") parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\") parser.add_argument( \"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" )",
"np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:, i] = 1.0 hypothetical_difference_from_reference = ( hypothetical_input[None, :, :] - bg_data[l]",
"load_model_wrapper(model_hdf5): # load the model! custom_objects = { \"recall\": recall, \"sensitivity\": recall, \"specificity\":",
"ref=pysam.FastaFile(args.ref_fasta) # load the model model = load_model_wrapper(args.model_hdf5) print(\"loaded model\") # create the",
"import logit, softmax tensorflow.compat.v1.disable_v2_behavior() import kerasAC import matplotlib import pandas as pd from",
"for i in range(orig_inp[l].shape[-1]): hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:, i] = 1.0 hypothetical_difference_from_reference =",
"kerasAC.interpret.deepshap import * from kerasAC.interpret.profile_shap import * from kerasAC.util import * from tensorflow.keras.models",
"\"recall\": recall, \"sensitivity\": recall, \"specificity\": specificity, \"fpr\": fpr, \"fnr\": fnr, \"precision\": precision, \"f1\":",
"in range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in range(cur_batch_size)] if args.dinuc_shuffle_input is True: seq_batch=[dinuc_shuffle(i) for",
") to_return.append(np.mean(projected_hypothetical_contribs, axis=0)) to_return.append(np.zeros_like(orig_inp[1])) return to_return def shuffle_several_times(s): numshuffles = 20 return [",
"* mult[l] projected_hypothetical_contribs[:, :, i] = np.sum( hypothetical_contribs, axis=-1 ) to_return.append(np.mean(projected_hypothetical_contribs, axis=0)) to_return.append(np.zeros_like(orig_inp[1]))",
"< nrow: cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i) for i in batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size)",
"parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={} for line in chrom_sizes: tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta) # load the",
"import tensorflow from deeplift.dinuc_shuffle import dinuc_shuffle from scipy.spatial.distance import jensenshannon from scipy.special import",
"parser.parse_args() def load_model_wrapper(model_hdf5): # load the model! custom_objects = { \"recall\": recall, \"sensitivity\":",
"batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i) for i in batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for",
"model_wrapper, data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac ) prof_explainer = create_explainer(model, ischip=False, task_index=0) print(\"made explainers\") #read in",
"for i in range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in range(cur_batch_size)] if args.dinuc_shuffle_input is True:",
"hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size #save print(\"saving outputs\") np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores) np.save(args.out_prefix+'.observed.profile.npy',observed_profile_scores) np.save(args.out_prefix+'.hyp.count.npy',hypothetical_count_scores) np.save(args.out_prefix+'.observed.count.npy',observed_count_scores) np.save(args.out_prefix+'.seq.npy',seq) if",
"the model model = load_model_wrapper(args.model_hdf5) print(\"loaded model\") # create the count & profile",
"* from tensorflow.keras.models import load_model from tensorflow.keras.utils import get_custom_objects from kerasAC.custom_losses import *",
"to_return = [] for l in [0]: projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\") assert len(orig_inp[l].shape) ==",
"return parser.parse_args() def load_model_wrapper(model_hdf5): # load the model! custom_objects = { \"recall\": recall,",
"in seq_batch] seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get the hypothetical scores for the batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch,",
"softmax tensorflow.compat.v1.disable_v2_behavior() import kerasAC import matplotlib import pandas as pd from kerasAC.interpret.deepshap import",
"l in [0]: projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\") assert len(orig_inp[l].shape) == 2 for i in",
"create_explainer(model, ischip=False, task_index=0) print(\"made explainers\") #read in the peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks",
"i in batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in range(cur_batch_size)]",
"argparse import math import pysam import shap import tensorflow from deeplift.dinuc_shuffle import dinuc_shuffle",
"load the model! custom_objects = { \"recall\": recall, \"sensitivity\": recall, \"specificity\": specificity, \"fpr\":",
"seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted output arrays\") #generate one-hot-encoded inputs start_index=0 while start_index < nrow: cur_batch_size=min(args.batch_size,nrow-start_index)",
"\"ambig_binary_crossentropy\": ambig_binary_crossentropy, \"ambig_mean_absolute_error\": ambig_mean_absolute_error, \"ambig_mean_squared_error\": ambig_mean_squared_error, \"MultichannelMultinomialNLL\": MultichannelMultinomialNLL, } get_custom_objects().update(custom_objects) return load_model(model_hdf5) def",
"np.zeros_like(bg_data[l]).astype(\"float\") assert len(orig_inp[l].shape) == 2 for i in range(orig_inp[l].shape[-1]): hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:,",
"from tensorflow.keras.models import load_model from tensorflow.keras.utils import get_custom_objects from kerasAC.custom_losses import * from",
"custom_objects = { \"recall\": recall, \"sensitivity\": recall, \"specificity\": specificity, \"fpr\": fpr, \"fnr\": fnr,",
"load_model(model_hdf5) def combine_mult_and_diffref(mult, orig_inp, bg_data): to_return = [] for l in [0]: projected_hypothetical_contribs",
"from deeplift.dinuc_shuffle import dinuc_shuffle from scipy.spatial.distance import jensenshannon from scipy.special import logit, softmax",
"for i in range(cur_batch_size)] if args.dinuc_shuffle_input is True: seq_batch=[dinuc_shuffle(i) for i in seq_batch]",
"seq_batch] seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get the hypothetical scores for the batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None)",
"ischip=False, task_index=0) print(\"made explainers\") #read in the peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks =",
"tensorflow.keras.models import load_model from tensorflow.keras.utils import get_custom_objects from kerasAC.custom_losses import * from kerasAC.metrics",
"= 1.0 hypothetical_difference_from_reference = ( hypothetical_input[None, :, :] - bg_data[l] ) hypothetical_contribs =",
"print(\"sampled peaks:\"+str(nrow)) #allocate space for numpy arrays for modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4))",
"chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={} for line in chrom_sizes: tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta) # load the model",
"model model = load_model_wrapper(args.model_hdf5) print(\"loaded model\") # create the count & profile explainers",
"seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in range(cur_batch_size)] if args.dinuc_shuffle_input is True: seq_batch=[dinuc_shuffle(i) for i in",
"shap.DeepExplainer( model_wrapper, data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac ) prof_explainer = create_explainer(model, ischip=False, task_index=0) print(\"made explainers\") #read",
"in [0]: projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\") assert len(orig_inp[l].shape) == 2 for i in range(orig_inp[l].shape[-1]):",
"modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted output arrays\") #generate one-hot-encoded inputs start_index=0",
"batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in range(cur_batch_size)] if args.dinuc_shuffle_input",
"for numpy arrays for modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted output arrays\")",
"recall, \"specificity\": specificity, \"fpr\": fpr, \"fnr\": fnr, \"precision\": precision, \"f1\": f1, \"ambig_binary_crossentropy\": ambig_binary_crossentropy,",
"peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks = peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0] print(\"sampled peaks:\"+str(nrow)) #allocate space for numpy",
"from kerasAC.util import * from tensorflow.keras.models import load_model from tensorflow.keras.utils import get_custom_objects from",
"tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks = peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0] print(\"sampled peaks:\"+str(nrow)) #allocate space for numpy arrays for",
"parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\", type=int, default=1057) parser.add_argument(\"--batch_size\",type=int,default=100) return parser.parse_args() def load_model_wrapper(model_hdf5): # load the",
"the batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size #save print(\"saving outputs\")",
"kerasAC.metrics import * def parse_args(): parser = argparse.ArgumentParser(description=\"Argument Parser for SNP scoring\") parser.add_argument(\"--model_hdf5\")",
"seq_batch=[dinuc_shuffle(i) for i in seq_batch] seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get the hypothetical scores for the",
"import * def parse_args(): parser = argparse.ArgumentParser(description=\"Argument Parser for SNP scoring\") parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\")",
"import jensenshannon from scipy.special import logit, softmax tensorflow.compat.v1.disable_v2_behavior() import kerasAC import matplotlib import",
"ambig_mean_absolute_error, \"ambig_mean_squared_error\": ambig_mean_squared_error, \"MultichannelMultinomialNLL\": MultichannelMultinomialNLL, } get_custom_objects().update(custom_objects) return load_model(model_hdf5) def combine_mult_and_diffref(mult, orig_inp, bg_data):",
"to_return def shuffle_several_times(s): numshuffles = 20 return [ np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]),",
"nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks = peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0] print(\"sampled peaks:\"+str(nrow)) #allocate space for numpy arrays",
"i in range(numshuffles)]), ] def main(): args = parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={} for line",
"data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac ) prof_explainer = create_explainer(model, ischip=False, task_index=0) print(\"made explainers\") #read in the",
"parser.add_argument(\"--batch_size\",type=int,default=100) return parser.parse_args() def load_model_wrapper(model_hdf5): # load the model! custom_objects = { \"recall\":",
"for line in chrom_sizes: tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta) # load the model model =",
"the count & profile explainers model_wrapper = (model.input, model.outputs[1][:, 0:1]) count_explainer = shap.DeepExplainer(",
"peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks = peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0] print(\"sampled peaks:\"+str(nrow)) #allocate space for",
"& profile explainers model_wrapper = (model.input, model.outputs[1][:, 0:1]) count_explainer = shap.DeepExplainer( model_wrapper, data=create_background_atac,",
"create the count & profile explainers model_wrapper = (model.input, model.outputs[1][:, 0:1]) count_explainer =",
"is True: seq_batch=[dinuc_shuffle(i) for i in seq_batch] seq_batch=one_hot_encode(seq_batch) seq[start_index:start_index+cur_batch_size,:,:]=seq_batch #get the hypothetical scores",
"i in range(cur_batch_size)] if args.dinuc_shuffle_input is True: seq_batch=[dinuc_shuffle(i) for i in seq_batch] seq_batch=one_hot_encode(seq_batch)",
"#save print(\"saving outputs\") np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores) np.save(args.out_prefix+'.observed.profile.npy',observed_profile_scores) np.save(args.out_prefix+'.hyp.count.npy',hypothetical_count_scores) np.save(args.out_prefix+'.observed.count.npy',observed_count_scores) np.save(args.out_prefix+'.seq.npy',seq) if __name__ == \"__main__\": main()",
"pysam import shap import tensorflow from deeplift.dinuc_shuffle import dinuc_shuffle from scipy.spatial.distance import jensenshannon",
"recall, \"sensitivity\": recall, \"specificity\": specificity, \"fpr\": fpr, \"fnr\": fnr, \"precision\": precision, \"f1\": f1,",
"import dinuc_shuffle from scipy.spatial.distance import jensenshannon from scipy.special import logit, softmax tensorflow.compat.v1.disable_v2_behavior() import",
"default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" ) parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\", type=int, default=1057) parser.add_argument(\"--batch_size\",type=int,default=100) return parser.parse_args() def load_model_wrapper(model_hdf5):",
"= np.zeros_like(bg_data[l]).astype(\"float\") assert len(orig_inp[l].shape) == 2 for i in range(orig_inp[l].shape[-1]): hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\")",
"#read in the peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks = peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0] print(\"sampled peaks:\"+str(nrow))",
"\"f1\": f1, \"ambig_binary_crossentropy\": ambig_binary_crossentropy, \"ambig_mean_absolute_error\": ambig_mean_absolute_error, \"ambig_mean_squared_error\": ambig_mean_squared_error, \"MultichannelMultinomialNLL\": MultichannelMultinomialNLL, } get_custom_objects().update(custom_objects) return",
"np.sum( hypothetical_contribs, axis=-1 ) to_return.append(np.mean(projected_hypothetical_contribs, axis=0)) to_return.append(np.zeros_like(orig_inp[1])) return to_return def shuffle_several_times(s): numshuffles =",
"batch_start_pos=[max(0,i) for i in batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i",
"i in range(numshuffles)]), np.array([s[1] for i in range(numshuffles)]), ] def main(): args =",
"import * from kerasAC.util import * from tensorflow.keras.models import load_model from tensorflow.keras.utils import",
"the hypothetical scores for the batch hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch",
"observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted output arrays\") #generate one-hot-encoded inputs start_index=0 while start_index <",
"] def main(): args = parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={} for line in chrom_sizes: tokens=line.split('\\t')",
"hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted output arrays\") #generate one-hot-encoded inputs start_index=0 while",
"scipy.special import logit, softmax tensorflow.compat.v1.disable_v2_behavior() import kerasAC import matplotlib import pandas as pd",
"= shap.DeepExplainer( model_wrapper, data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac ) prof_explainer = create_explainer(model, ischip=False, task_index=0) print(\"made explainers\")",
"model\") # create the count & profile explainers model_wrapper = (model.input, model.outputs[1][:, 0:1])",
"parser = argparse.ArgumentParser(description=\"Argument Parser for SNP scoring\") parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\") parser.add_argument( \"--ref_fasta\",",
"bg_data[l] ) hypothetical_contribs = hypothetical_difference_from_reference * mult[l] projected_hypothetical_contribs[:, :, i] = np.sum( hypothetical_contribs,",
"= np.sum( hypothetical_contribs, axis=-1 ) to_return.append(np.mean(projected_hypothetical_contribs, axis=0)) to_return.append(np.zeros_like(orig_inp[1])) return to_return def shuffle_several_times(s): numshuffles",
"import matplotlib import pandas as pd from kerasAC.interpret.deepshap import * from kerasAC.interpret.profile_shap import",
"i] = 1.0 hypothetical_difference_from_reference = ( hypothetical_input[None, :, :] - bg_data[l] ) hypothetical_contribs",
"to_return.append(np.zeros_like(orig_inp[1])) return to_return def shuffle_several_times(s): numshuffles = 20 return [ np.array([dinuc_shuffle(s[0]) for i",
"task_index=0) print(\"made explainers\") #read in the peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t') nrow=peaks.shape[0] tosample=round(int(args.npeaks_to_sample)/nrow,2) peaks = peaks.sample(frac=tosample).reset_index(drop=True)",
"parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\") parser.add_argument( \"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" ) parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\", type=int,",
"1.0 hypothetical_difference_from_reference = ( hypothetical_input[None, :, :] - bg_data[l] ) hypothetical_contribs = hypothetical_difference_from_reference",
"* from kerasAC.metrics import * def parse_args(): parser = argparse.ArgumentParser(description=\"Argument Parser for SNP",
"parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\", type=int, default=1057) parser.add_argument(\"--batch_size\",type=int,default=100) return parser.parse_args() def load_model_wrapper(model_hdf5): # load",
"pandas as pd from kerasAC.interpret.deepshap import * from kerasAC.interpret.profile_shap import * from kerasAC.util",
"( hypothetical_input[None, :, :] - bg_data[l] ) hypothetical_contribs = hypothetical_difference_from_reference * mult[l] projected_hypothetical_contribs[:,",
"SNP scoring\") parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\") parser.add_argument( \"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" ) parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\")",
"arrays for modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted output arrays\") #generate one-hot-encoded",
"= peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0] print(\"sampled peaks:\"+str(nrow)) #allocate space for numpy arrays for modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4))",
") prof_explainer = create_explainer(model, ischip=False, task_index=0) print(\"made explainers\") #read in the peaks peaks=pd.read_csv(args.peak_file,header=None,sep='\\t')",
"#allocate space for numpy arrays for modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4)) print(\"pre-allocted",
"def main(): args = parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={} for line in chrom_sizes: tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1])",
"* from kerasAC.util import * from tensorflow.keras.models import load_model from tensorflow.keras.utils import get_custom_objects",
"in range(orig_inp[l].shape[-1]): hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:, i] = 1.0 hypothetical_difference_from_reference = ( hypothetical_input[None,",
"0:1]) count_explainer = shap.DeepExplainer( model_wrapper, data=create_background_atac, combine_mult_and_diffref=combine_mult_and_diffref_atac ) prof_explainer = create_explainer(model, ischip=False, task_index=0)",
"peaks.sample(frac=tosample).reset_index(drop=True) nrow=peaks.shape[0] print(\"sampled peaks:\"+str(nrow)) #allocate space for numpy arrays for modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4))",
"tensorflow.compat.v1.disable_v2_behavior() import kerasAC import matplotlib import pandas as pd from kerasAC.interpret.deepshap import *",
"in chrom_sizes: tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta) # load the model model = load_model_wrapper(args.model_hdf5) print(\"loaded",
"i in range(orig_inp[l].shape[-1]): hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:, i] = 1.0 hypothetical_difference_from_reference = (",
"in batch_start_pos] batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in range(cur_batch_size)] if",
"combine_mult_and_diffref=combine_mult_and_diffref_atac ) prof_explainer = create_explainer(model, ischip=False, task_index=0) print(\"made explainers\") #read in the peaks",
"def parse_args(): parser = argparse.ArgumentParser(description=\"Argument Parser for SNP scoring\") parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\")",
"range(cur_batch_size)] seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in range(cur_batch_size)] if args.dinuc_shuffle_input is True: seq_batch=[dinuc_shuffle(i) for i",
"peaks:\"+str(nrow)) #allocate space for numpy arrays for modisco hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4)) hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4)) observed_profile_scores=np.empty((nrow,2*args.flank_size,4)) observed_count_scores=np.empty((nrow,2*args.flank_size,4)) seq=np.empty((nrow,2*args.flank_size,4))",
"to_return.append(np.mean(projected_hypothetical_contribs, axis=0)) to_return.append(np.zeros_like(orig_inp[1])) return to_return def shuffle_several_times(s): numshuffles = 20 return [ np.array([dinuc_shuffle(s[0])",
"== 2 for i in range(orig_inp[l].shape[-1]): hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:, i] = 1.0",
"= parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n') chrom_size_dict={} for line in chrom_sizes: tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta) # load",
"the model! custom_objects = { \"recall\": recall, \"sensitivity\": recall, \"specificity\": specificity, \"fpr\": fpr,",
"import shap import tensorflow from deeplift.dinuc_shuffle import dinuc_shuffle from scipy.spatial.distance import jensenshannon from",
"hypothetical_difference_from_reference * mult[l] projected_hypothetical_contribs[:, :, i] = np.sum( hypothetical_contribs, axis=-1 ) to_return.append(np.mean(projected_hypothetical_contribs, axis=0))",
"axis=-1 ) to_return.append(np.mean(projected_hypothetical_contribs, axis=0)) to_return.append(np.zeros_like(orig_inp[1])) return to_return def shuffle_several_times(s): numshuffles = 20 return",
"while start_index < nrow: cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i) for i in",
"hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:, i] = 1.0 hypothetical_difference_from_reference = ( hypothetical_input[None, :, :]",
"kerasAC.interpret.profile_shap import * from kerasAC.util import * from tensorflow.keras.models import load_model from tensorflow.keras.utils",
"[0]: projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\") assert len(orig_inp[l].shape) == 2 for i in range(orig_inp[l].shape[-1]): hypothetical_input",
") parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\", type=int, default=1057) parser.add_argument(\"--batch_size\",type=int,default=100) return parser.parse_args() def load_model_wrapper(model_hdf5): #",
"axis=0)) to_return.append(np.zeros_like(orig_inp[1])) return to_return def shuffle_several_times(s): numshuffles = 20 return [ np.array([dinuc_shuffle(s[0]) for",
"hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None) observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0]) observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch start_index+=args.batch_size #save print(\"saving outputs\") np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores) np.save(args.out_prefix+'.observed.profile.npy',observed_profile_scores)",
"= [] for l in [0]: projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype(\"float\") assert len(orig_inp[l].shape) == 2",
"numshuffles = 20 return [ np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]), np.array([s[1] for i",
"= argparse.ArgumentParser(description=\"Argument Parser for SNP scoring\") parser.add_argument(\"--model_hdf5\") parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\") parser.add_argument( \"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\"",
"import kerasAC import matplotlib import pandas as pd from kerasAC.interpret.deepshap import * from",
"\"MultichannelMultinomialNLL\": MultichannelMultinomialNLL, } get_custom_objects().update(custom_objects) return load_model(model_hdf5) def combine_mult_and_diffref(mult, orig_inp, bg_data): to_return = []",
"chrom_sizes: tokens=line.split('\\t') chrom_size_dict[tokens[0]]=int(tokens[1]) ref=pysam.FastaFile(args.ref_fasta) # load the model model = load_model_wrapper(args.model_hdf5) print(\"loaded model\")",
"start_index+=args.batch_size #save print(\"saving outputs\") np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores) np.save(args.out_prefix+'.observed.profile.npy',observed_profile_scores) np.save(args.out_prefix+'.hyp.count.npy',hypothetical_count_scores) np.save(args.out_prefix+'.observed.count.npy',observed_count_scores) np.save(args.out_prefix+'.seq.npy',seq) if __name__ == \"__main__\":",
"# create the count & profile explainers model_wrapper = (model.input, model.outputs[1][:, 0:1]) count_explainer",
"#generate one-hot-encoded inputs start_index=0 while start_index < nrow: cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist()",
"[ np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]), np.array([s[1] for i in range(numshuffles)]), ] def",
"parser.add_argument(\"--peak_file\") parser.add_argument(\"--npeaks_to_sample\",type=int,default=30000) parser.add_argument(\"--out_prefix\") parser.add_argument( \"--ref_fasta\", default=\"/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta\" ) parser.add_argument(\"--dinuc_shuffle_input\",action='store_true',default=False) parser.add_argument(\"--chrom_sizes\", default=\"/data/hg38.chrom.sizes\") parser.add_argument(\"--flank_size\", type=int, default=1057)",
"assert len(orig_inp[l].shape) == 2 for i in range(orig_inp[l].shape[-1]): hypothetical_input = np.zeros_like(orig_inp[l]).astype(\"float\") hypothetical_input[:, i]",
"import * from kerasAC.metrics import * def parse_args(): parser = argparse.ArgumentParser(description=\"Argument Parser for",
"\"ambig_mean_absolute_error\": ambig_mean_absolute_error, \"ambig_mean_squared_error\": ambig_mean_squared_error, \"MultichannelMultinomialNLL\": MultichannelMultinomialNLL, } get_custom_objects().update(custom_objects) return load_model(model_hdf5) def combine_mult_and_diffref(mult, orig_inp,",
"logit, softmax tensorflow.compat.v1.disable_v2_behavior() import kerasAC import matplotlib import pandas as pd from kerasAC.interpret.deepshap",
"= { \"recall\": recall, \"sensitivity\": recall, \"specificity\": specificity, \"fpr\": fpr, \"fnr\": fnr, \"precision\":",
"fpr, \"fnr\": fnr, \"precision\": precision, \"f1\": f1, \"ambig_binary_crossentropy\": ambig_binary_crossentropy, \"ambig_mean_absolute_error\": ambig_mean_absolute_error, \"ambig_mean_squared_error\": ambig_mean_squared_error,",
"matplotlib import pandas as pd from kerasAC.interpret.deepshap import * from kerasAC.interpret.profile_shap import *",
"= 20 return [ np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]), np.array([s[1] for i in",
"range(numshuffles)]), np.array([s[1] for i in range(numshuffles)]), ] def main(): args = parse_args() chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\\n')",
"start_index=0 while start_index < nrow: cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist() batch_start_pos=peaks[1]+peaks[9]-args.flank_size batch_start_pos=batch_start_pos.tolist() batch_start_pos=[max(0,i) for i",
"output arrays\") #generate one-hot-encoded inputs start_index=0 while start_index < nrow: cur_batch_size=min(args.batch_size,nrow-start_index) print(str(start_index)+\":\"+str(start_index+cur_batch_size)) batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist()"
] |
[
"nowym boku, # będącym sumą boków dodawanych do siebie kwadratów. class Ksztalty: def",
"dodaj_opis(self, text): self.opis = text def skalowanie(self, czynnik): self.x = self.x * czynnik",
"= Kwadrat(kw1 + kw2) kw4 = Kwadrat(kw1 + kw3) print('kw1 ma wymiary:', kw1.x,'x',",
"Przeciąż metodę __add__() dla klasy Kwadrat, # która będzie zwracała instancje klasy Kwadrat",
"self.x * self.y def obwod(self): return 2 * self.x + 2 * self.y",
"+ other.x kw1 = Kwadrat(5) kw2 = Kwadrat(6) kw3 = Kwadrat(kw1 + kw2)",
"* czynnik self.x = self.y * czynnik class Kwadrat(Ksztalty): def __init__(self, x): self.x",
"* czynnik class Kwadrat(Ksztalty): def __init__(self, x): self.x = x self.y = x",
"ma wymiary:', kw1.x,'x', kw1.y) print('kw2 ma wymiary:', kw2.x,'x', kw2.y) print('kw3 ma wymiary:', kw3.x,'x',",
"kw2 = Kwadrat(6) kw3 = Kwadrat(kw1 + kw2) kw4 = Kwadrat(kw1 + kw3)",
"2 * self.x + 2 * self.y def dodaj_opis(self, text): self.opis = text",
"class Ksztalty: def __init__(self, x, y): self.x=x self.y=y self.opis = \"To będzie klasa",
"która będzie zwracała instancje klasy Kwadrat o nowym boku, # będącym sumą boków",
"text def skalowanie(self, czynnik): self.x = self.x * czynnik self.x = self.y *",
"= Kwadrat(kw1 + kw3) print('kw1 ma wymiary:', kw1.x,'x', kw1.y) print('kw2 ma wymiary:', kw2.x,'x',",
"boków dodawanych do siebie kwadratów. class Ksztalty: def __init__(self, x, y): self.x=x self.y=y",
"return 2 * self.x + 2 * self.y def dodaj_opis(self, text): self.opis =",
"dodawanych do siebie kwadratów. class Ksztalty: def __init__(self, x, y): self.x=x self.y=y self.opis",
"def skalowanie(self, czynnik): self.x = self.x * czynnik self.x = self.y * czynnik",
"self.opis = \"To będzie klasa dla ogólnych kształtów\" def pole(self): return self.x *",
"return self.x * self.y def obwod(self): return 2 * self.x + 2 *",
"= x def __add__(self, other): return self.x + other.x kw1 = Kwadrat(5) kw2",
"będącym sumą boków dodawanych do siebie kwadratów. class Ksztalty: def __init__(self, x, y):",
"kw3 = Kwadrat(kw1 + kw2) kw4 = Kwadrat(kw1 + kw3) print('kw1 ma wymiary:',",
"o nowym boku, # będącym sumą boków dodawanych do siebie kwadratów. class Ksztalty:",
"self.y=y self.opis = \"To będzie klasa dla ogólnych kształtów\" def pole(self): return self.x",
"= Kwadrat(5) kw2 = Kwadrat(6) kw3 = Kwadrat(kw1 + kw2) kw4 = Kwadrat(kw1",
"= text def skalowanie(self, czynnik): self.x = self.x * czynnik self.x = self.y",
"kw3) print('kw1 ma wymiary:', kw1.x,'x', kw1.y) print('kw2 ma wymiary:', kw2.x,'x', kw2.y) print('kw3 ma",
"do siebie kwadratów. class Ksztalty: def __init__(self, x, y): self.x=x self.y=y self.opis =",
"self.y * czynnik class Kwadrat(Ksztalty): def __init__(self, x): self.x = x self.y =",
"= Kwadrat(6) kw3 = Kwadrat(kw1 + kw2) kw4 = Kwadrat(kw1 + kw3) print('kw1",
"# będącym sumą boków dodawanych do siebie kwadratów. class Ksztalty: def __init__(self, x,",
"__add__(self, other): return self.x + other.x kw1 = Kwadrat(5) kw2 = Kwadrat(6) kw3",
"zwracała instancje klasy Kwadrat o nowym boku, # będącym sumą boków dodawanych do",
"Kwadrat o nowym boku, # będącym sumą boków dodawanych do siebie kwadratów. class",
"def obwod(self): return 2 * self.x + 2 * self.y def dodaj_opis(self, text):",
"boku, # będącym sumą boków dodawanych do siebie kwadratów. class Ksztalty: def __init__(self,",
"self.y def dodaj_opis(self, text): self.opis = text def skalowanie(self, czynnik): self.x = self.x",
"__init__(self, x): self.x = x self.y = x def __add__(self, other): return self.x",
"= self.y * czynnik class Kwadrat(Ksztalty): def __init__(self, x): self.x = x self.y",
"self.y = x def __add__(self, other): return self.x + other.x kw1 = Kwadrat(5)",
"self.x=x self.y=y self.opis = \"To będzie klasa dla ogólnych kształtów\" def pole(self): return",
"kw2) kw4 = Kwadrat(kw1 + kw3) print('kw1 ma wymiary:', kw1.x,'x', kw1.y) print('kw2 ma",
"wymiary:', kw1.x,'x', kw1.y) print('kw2 ma wymiary:', kw2.x,'x', kw2.y) print('kw3 ma wymiary:', kw3.x,'x', kw3.y)",
"+ kw2) kw4 = Kwadrat(kw1 + kw3) print('kw1 ma wymiary:', kw1.x,'x', kw1.y) print('kw2",
"pole(self): return self.x * self.y def obwod(self): return 2 * self.x + 2",
"będzie zwracała instancje klasy Kwadrat o nowym boku, # będącym sumą boków dodawanych",
"x, y): self.x=x self.y=y self.opis = \"To będzie klasa dla ogólnych kształtów\" def",
"class Kwadrat(Ksztalty): def __init__(self, x): self.x = x self.y = x def __add__(self,",
"# Przeciąż metodę __add__() dla klasy Kwadrat, # która będzie zwracała instancje klasy",
"self.x = x self.y = x def __add__(self, other): return self.x + other.x",
"self.x + other.x kw1 = Kwadrat(5) kw2 = Kwadrat(6) kw3 = Kwadrat(kw1 +",
"klasy Kwadrat, # która będzie zwracała instancje klasy Kwadrat o nowym boku, #",
"Kwadrat(kw1 + kw3) print('kw1 ma wymiary:', kw1.x,'x', kw1.y) print('kw2 ma wymiary:', kw2.x,'x', kw2.y)",
"self.x * czynnik self.x = self.y * czynnik class Kwadrat(Ksztalty): def __init__(self, x):",
"__init__(self, x, y): self.x=x self.y=y self.opis = \"To będzie klasa dla ogólnych kształtów\"",
"skalowanie(self, czynnik): self.x = self.x * czynnik self.x = self.y * czynnik class",
"self.x = self.x * czynnik self.x = self.y * czynnik class Kwadrat(Ksztalty): def",
"= \"To będzie klasa dla ogólnych kształtów\" def pole(self): return self.x * self.y",
"kw1 = Kwadrat(5) kw2 = Kwadrat(6) kw3 = Kwadrat(kw1 + kw2) kw4 =",
"kształtów\" def pole(self): return self.x * self.y def obwod(self): return 2 * self.x",
"Kwadrat(kw1 + kw2) kw4 = Kwadrat(kw1 + kw3) print('kw1 ma wymiary:', kw1.x,'x', kw1.y)",
"# która będzie zwracała instancje klasy Kwadrat o nowym boku, # będącym sumą",
"Ksztalty: def __init__(self, x, y): self.x=x self.y=y self.opis = \"To będzie klasa dla",
"x self.y = x def __add__(self, other): return self.x + other.x kw1 =",
"def pole(self): return self.x * self.y def obwod(self): return 2 * self.x +",
"instancje klasy Kwadrat o nowym boku, # będącym sumą boków dodawanych do siebie",
"Kwadrat(Ksztalty): def __init__(self, x): self.x = x self.y = x def __add__(self, other):",
"Kwadrat(5) kw2 = Kwadrat(6) kw3 = Kwadrat(kw1 + kw2) kw4 = Kwadrat(kw1 +",
"x): self.x = x self.y = x def __add__(self, other): return self.x +",
"kw1.x,'x', kw1.y) print('kw2 ma wymiary:', kw2.x,'x', kw2.y) print('kw3 ma wymiary:', kw3.x,'x', kw3.y) print('kw4",
"def __init__(self, x, y): self.x=x self.y=y self.opis = \"To będzie klasa dla ogólnych",
"czynnik): self.x = self.x * czynnik self.x = self.y * czynnik class Kwadrat(Ksztalty):",
"def __add__(self, other): return self.x + other.x kw1 = Kwadrat(5) kw2 = Kwadrat(6)",
"2 * self.y def dodaj_opis(self, text): self.opis = text def skalowanie(self, czynnik): self.x",
"other.x kw1 = Kwadrat(5) kw2 = Kwadrat(6) kw3 = Kwadrat(kw1 + kw2) kw4",
"czynnik self.x = self.y * czynnik class Kwadrat(Ksztalty): def __init__(self, x): self.x =",
"klasy Kwadrat o nowym boku, # będącym sumą boków dodawanych do siebie kwadratów.",
"* self.x + 2 * self.y def dodaj_opis(self, text): self.opis = text def",
"czynnik class Kwadrat(Ksztalty): def __init__(self, x): self.x = x self.y = x def",
"+ kw3) print('kw1 ma wymiary:', kw1.x,'x', kw1.y) print('kw2 ma wymiary:', kw2.x,'x', kw2.y) print('kw3",
"y): self.x=x self.y=y self.opis = \"To będzie klasa dla ogólnych kształtów\" def pole(self):",
"obwod(self): return 2 * self.x + 2 * self.y def dodaj_opis(self, text): self.opis",
"= self.x * czynnik self.x = self.y * czynnik class Kwadrat(Ksztalty): def __init__(self,",
"Kwadrat, # która będzie zwracała instancje klasy Kwadrat o nowym boku, # będącym",
"kwadratów. class Ksztalty: def __init__(self, x, y): self.x=x self.y=y self.opis = \"To będzie",
"return self.x + other.x kw1 = Kwadrat(5) kw2 = Kwadrat(6) kw3 = Kwadrat(kw1",
"dla klasy Kwadrat, # która będzie zwracała instancje klasy Kwadrat o nowym boku,",
"def dodaj_opis(self, text): self.opis = text def skalowanie(self, czynnik): self.x = self.x *",
"ma wymiary:', kw2.x,'x', kw2.y) print('kw3 ma wymiary:', kw3.x,'x', kw3.y) print('kw4 ma wymiary:', kw4.x,'x',",
"\"To będzie klasa dla ogólnych kształtów\" def pole(self): return self.x * self.y def",
"def __init__(self, x): self.x = x self.y = x def __add__(self, other): return",
"kw4 = Kwadrat(kw1 + kw3) print('kw1 ma wymiary:', kw1.x,'x', kw1.y) print('kw2 ma wymiary:',",
"sumą boków dodawanych do siebie kwadratów. class Ksztalty: def __init__(self, x, y): self.x=x",
"text): self.opis = text def skalowanie(self, czynnik): self.x = self.x * czynnik self.x",
"* self.y def obwod(self): return 2 * self.x + 2 * self.y def",
"siebie kwadratów. class Ksztalty: def __init__(self, x, y): self.x=x self.y=y self.opis = \"To",
"self.opis = text def skalowanie(self, czynnik): self.x = self.x * czynnik self.x =",
"klasa dla ogólnych kształtów\" def pole(self): return self.x * self.y def obwod(self): return",
"self.y def obwod(self): return 2 * self.x + 2 * self.y def dodaj_opis(self,",
"wymiary:', kw2.x,'x', kw2.y) print('kw3 ma wymiary:', kw3.x,'x', kw3.y) print('kw4 ma wymiary:', kw4.x,'x', kw4.y)",
"print('kw2 ma wymiary:', kw2.x,'x', kw2.y) print('kw3 ma wymiary:', kw3.x,'x', kw3.y) print('kw4 ma wymiary:',",
"będzie klasa dla ogólnych kształtów\" def pole(self): return self.x * self.y def obwod(self):",
"kw1.y) print('kw2 ma wymiary:', kw2.x,'x', kw2.y) print('kw3 ma wymiary:', kw3.x,'x', kw3.y) print('kw4 ma",
"__add__() dla klasy Kwadrat, # która będzie zwracała instancje klasy Kwadrat o nowym",
"metodę __add__() dla klasy Kwadrat, # która będzie zwracała instancje klasy Kwadrat o",
"+ 2 * self.y def dodaj_opis(self, text): self.opis = text def skalowanie(self, czynnik):",
"* self.y def dodaj_opis(self, text): self.opis = text def skalowanie(self, czynnik): self.x =",
"print('kw1 ma wymiary:', kw1.x,'x', kw1.y) print('kw2 ma wymiary:', kw2.x,'x', kw2.y) print('kw3 ma wymiary:',",
"Kwadrat(6) kw3 = Kwadrat(kw1 + kw2) kw4 = Kwadrat(kw1 + kw3) print('kw1 ma",
"self.x + 2 * self.y def dodaj_opis(self, text): self.opis = text def skalowanie(self,",
"dla ogólnych kształtów\" def pole(self): return self.x * self.y def obwod(self): return 2",
"x def __add__(self, other): return self.x + other.x kw1 = Kwadrat(5) kw2 =",
"other): return self.x + other.x kw1 = Kwadrat(5) kw2 = Kwadrat(6) kw3 =",
"= x self.y = x def __add__(self, other): return self.x + other.x kw1",
"self.x = self.y * czynnik class Kwadrat(Ksztalty): def __init__(self, x): self.x = x",
"ogólnych kształtów\" def pole(self): return self.x * self.y def obwod(self): return 2 *"
] |
[
"your algorithm by explicitly filling out the A[i, j] table for the following",
"sum(ski_heights), name=\"z\") model = Model(minimize=z) # constraints model += [AllDifferent(x)] # model +=",
"differences:\", z.value()) for i in range(num_skiers): x_val = x[i].value() ski_height = ski_heights[x[i].value()] diff",
"4, 7, 11, 18] # which ski to choose for each skier x",
"by explicitly filling out the A[i, j] table for the following sample data:",
"a skier and his or her skis to be the absolute value of",
"= x[i].value() ski_height = ski_heights[x[i].value()] diff = ski_height - skier_heights[i] print('Skier %i: Ski",
"table for the following sample data: * Ski heights: 1, 2, 5, 7,",
"in the winter you've decided to become a ski bum. You've hooked up",
"out the A[i, j] table for the following sample data: * Ski heights:",
"the following sample data: * Ski heights: 1, 2, 5, 7, 13, 21.",
"an algorithm to assign skis to skiers. Ideally, each skier should obtain a",
"+= [z == sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for i in range(num_skiers)] )] ss =",
"intvar(0, sum(ski_heights), name=\"z\") model = Model(minimize=z) # constraints model += [AllDifferent(x)] # model",
"decided to become a ski bum. You've hooked up with the Mount Baldy",
"Mount Baldy Ski Resort. They'll let you ski all winter for free in",
"%\\ (i, x_val, ski_height, diff)) print() print() print('num_solutions:', num_solutions) return ss ss =",
"disparities. ... Illustrate your algorithm by explicitly filling out the A[i, j] table",
"range(num_skiers)] )] ss = CPM_ortools(model) num_solutions = 0 if ss.solve(): num_solutions += 1",
"Ideally, each skier should obtain a pair of skis whose height matches his",
"From <NAME>, Jr.: PIC 60, Fall 2008 Final Review, December 12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf",
"length %2i (diff: %2i)' %\\ (i, x_val, ski_height, diff)) print() print() print('num_solutions:', num_solutions)",
"Ski assignment in cpmpy From <NAME>, Jr.: PIC 60, Fall 2008 Final Review,",
"# model += [z == sum([abs(ski_heights[x[i]] - skier_heights[i]) for i in range(num_skiers)] )]",
"assignment in cpmpy From <NAME>, Jr.: PIC 60, Fall 2008 Final Review, December",
"that minimizes the sum of the disparities. ... Illustrate your algorithm by explicitly",
"the disparities. ... Illustrate your algorithm by explicitly filling out the A[i, j]",
"heights: 3, 4, 7, 11, 18. ''' This cpmpy model was written by",
"= 5 ski_heights = [1, 2, 5, 7, 13, 21] skier_heights = [3,",
"''' This cpmpy model was written by <NAME> (<EMAIL>) See also my cpmpy",
"of skis to skiers that minimizes the sum of the disparities. ... Illustrate",
"exchange for helping their ski rental shop with an algorithm to assign skis",
"in cpmpy From <NAME>, Jr.: PIC 60, Fall 2008 Final Review, December 12,",
"skier x = intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z = intvar(0, sum(ski_heights), name=\"z\") model = Model(minimize=z) #",
"pair of skis whose height matches his or her own height exactly. Unfortunately,",
"December 12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5. Ski Optimization! Your job at Snapple is",
"intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z = intvar(0, sum(ski_heights), name=\"z\") model = Model(minimize=z) # constraints model +=",
"matches his or her own height exactly. Unfortunately, this is generally not possible.",
"a ski bum. You've hooked up with the Mount Baldy Ski Resort. They'll",
"ss.solve(): num_solutions += 1 print(\"total differences:\", z.value()) for i in range(num_skiers): x_val =",
"sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for i in range(num_skiers)] )] ss = CPM_ortools(model) num_solutions =",
"algorithm by explicitly filling out the A[i, j] table for the following sample",
"this is generally not possible. We define the disparity between a skier and",
"following sample data: * Ski heights: 1, 2, 5, 7, 13, 21. *",
"page: http://hakank.org/cpmpy/ \"\"\" from cpmpy import * import cpmpy.solvers import numpy as np",
"between a skier and his or her skis to be the absolute value",
"to skiers that minimizes the sum of the disparities. ... Illustrate your algorithm",
"for i in range(num_skiers)] )] model += [z == sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for",
"cpmpy.solvers import numpy as np from cpmpy_hakank import * def ski_assignment(): # data",
"ski_assignment(): # data num_skis = 6 num_skiers = 5 ski_heights = [1, 2,",
"11, 18] # which ski to choose for each skier x = intvar(0,num_skis-1,shape=num_skiers,name=\"x\")",
"skier_heights[i]) for i in range(num_skiers)] )] ss = CPM_ortools(model) num_solutions = 0 if",
"cpmpy model was written by <NAME> (<EMAIL>) See also my cpmpy page: http://hakank.org/cpmpy/",
"assignment of skis to skiers that minimizes the sum of the disparities. ...",
"Final Review, December 12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5. Ski Optimization! Your job at",
"skis to skiers. Ideally, each skier should obtain a pair of skis whose",
"assign skis to skiers. Ideally, each skier should obtain a pair of skis",
"4, 7, 11, 18. ''' This cpmpy model was written by <NAME> (<EMAIL>)",
"= ski_height - skier_heights[i] print('Skier %i: Ski %i with length %2i (diff: %2i)'",
"Review, December 12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5. Ski Optimization! Your job at Snapple",
"2, 5, 7, 13, 21] skier_heights = [3, 4, 7, 11, 18] #",
"skier should obtain a pair of skis whose height matches his or her",
"21. * Skier heights: 3, 4, 7, 11, 18. ''' This cpmpy model",
"by <NAME> (<EMAIL>) See also my cpmpy page: http://hakank.org/cpmpy/ \"\"\" from cpmpy import",
"minimizes the sum of the disparities. ... Illustrate your algorithm by explicitly filling",
"skier_heights = [3, 4, 7, 11, 18] # which ski to choose for",
"her skis to be the absolute value of the difference between the height",
"(diff: %2i)' %\\ (i, x_val, ski_height, diff)) print() print() print('num_solutions:', num_solutions) return ss",
"his or her skis to be the absolute value of the difference between",
"bum. You've hooked up with the Mount Baldy Ski Resort. They'll let you",
"j] table for the following sample data: * Ski heights: 1, 2, 5,",
"%i: Ski %i with length %2i (diff: %2i)' %\\ (i, x_val, ski_height, diff))",
"is generally not possible. We define the disparity between a skier and his",
"skiers that minimizes the sum of the disparities. ... Illustrate your algorithm by",
"18] # which ski to choose for each skier x = intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z",
"Fall 2008 Final Review, December 12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5. Ski Optimization! Your",
"shop with an algorithm to assign skis to skiers. Ideally, each skier should",
"which ski to choose for each skier x = intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z = intvar(0,",
"cpmpy From <NAME>, Jr.: PIC 60, Fall 2008 Final Review, December 12, 2008",
"%i with length %2i (diff: %2i)' %\\ (i, x_val, ski_height, diff)) print() print()",
"the A[i, j] table for the following sample data: * Ski heights: 1,",
"[3, 4, 7, 11, 18] # which ski to choose for each skier",
"model was written by <NAME> (<EMAIL>) See also my cpmpy page: http://hakank.org/cpmpy/ \"\"\"",
"obtain a pair of skis whose height matches his or her own height",
"the winter you've decided to become a ski bum. You've hooked up with",
"with an algorithm to assign skis to skiers. Ideally, each skier should obtain",
"generally not possible. We define the disparity between a skier and his or",
"%2i (diff: %2i)' %\\ (i, x_val, ski_height, diff)) print() print() print('num_solutions:', num_solutions) return",
"6 num_skiers = 5 ski_heights = [1, 2, 5, 7, 13, 21] skier_heights",
"Skier heights: 3, 4, 7, 11, 18. ''' This cpmpy model was written",
"or her skis to be the absolute value of the difference between the",
"model += [z == sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for i in range(num_skiers)] )] ss",
"their ski rental shop with an algorithm to assign skis to skiers. Ideally,",
"to find an assignment of skis to skiers that minimizes the sum of",
"ski rental shop with an algorithm to assign skis to skiers. Ideally, each",
"... Illustrate your algorithm by explicitly filling out the A[i, j] table for",
"with length %2i (diff: %2i)' %\\ (i, x_val, ski_height, diff)) print() print() print('num_solutions:',",
"skiers. Ideally, each skier should obtain a pair of skis whose height matches",
"0 if ss.solve(): num_solutions += 1 print(\"total differences:\", z.value()) for i in range(num_skiers):",
"for helping their ski rental shop with an algorithm to assign skis to",
"print('Skier %i: Ski %i with length %2i (diff: %2i)' %\\ (i, x_val, ski_height,",
"60, Fall 2008 Final Review, December 12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5. Ski Optimization!",
"heights: 1, 2, 5, 7, 13, 21. * Skier heights: 3, 4, 7,",
"also my cpmpy page: http://hakank.org/cpmpy/ \"\"\" from cpmpy import * import cpmpy.solvers import",
"+= 1 print(\"total differences:\", z.value()) for i in range(num_skiers): x_val = x[i].value() ski_height",
"the skier and the pair of skis. Our objective is to find an",
"ss = CPM_ortools(model) num_solutions = 0 if ss.solve(): num_solutions += 1 print(\"total differences:\",",
"free in exchange for helping their ski rental shop with an algorithm to",
"diff = ski_height - skier_heights[i] print('Skier %i: Ski %i with length %2i (diff:",
"let you ski all winter for free in exchange for helping their ski",
"rental shop with an algorithm to assign skis to skiers. Ideally, each skier",
"They'll let you ski all winter for free in exchange for helping their",
"the disparity between a skier and his or her skis to be the",
"i in range(num_skiers): x_val = x[i].value() ski_height = ski_heights[x[i].value()] diff = ski_height -",
"2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5. Ski Optimization! Your job at Snapple is pleasant but",
"job at Snapple is pleasant but in the winter you've decided to become",
"for free in exchange for helping their ski rental shop with an algorithm",
"her own height exactly. Unfortunately, this is generally not possible. We define the",
"the Mount Baldy Ski Resort. They'll let you ski all winter for free",
"import * def ski_assignment(): # data num_skis = 6 num_skiers = 5 ski_heights",
"to choose for each skier x = intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z = intvar(0, sum(ski_heights), name=\"z\")",
"skier_heights[i]) for i in range(num_skiers)] )] model += [z == sum([abs(Element(ski_heights,x[i]) - skier_heights[i])",
"PIC 60, Fall 2008 Final Review, December 12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5. Ski",
"7, 13, 21. * Skier heights: 3, 4, 7, 11, 18. ''' This",
"become a ski bum. You've hooked up with the Mount Baldy Ski Resort.",
"7, 11, 18] # which ski to choose for each skier x =",
"[z == sum([abs(ski_heights[x[i]] - skier_heights[i]) for i in range(num_skiers)] )] model += [z",
"at Snapple is pleasant but in the winter you've decided to become a",
"skier and the pair of skis. Our objective is to find an assignment",
"be the absolute value of the difference between the height of the skier",
"== sum([abs(ski_heights[x[i]] - skier_heights[i]) for i in range(num_skiers)] )] model += [z ==",
"Resort. They'll let you ski all winter for free in exchange for helping",
"skier_heights[i] print('Skier %i: Ski %i with length %2i (diff: %2i)' %\\ (i, x_val,",
"num_skis = 6 num_skiers = 5 ski_heights = [1, 2, 5, 7, 13,",
"import * import cpmpy.solvers import numpy as np from cpmpy_hakank import * def",
"z = intvar(0, sum(ski_heights), name=\"z\") model = Model(minimize=z) # constraints model += [AllDifferent(x)]",
")] ss = CPM_ortools(model) num_solutions = 0 if ss.solve(): num_solutions += 1 print(\"total",
"12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5. Ski Optimization! Your job at Snapple is pleasant",
"should obtain a pair of skis whose height matches his or her own",
"range(num_skiers): x_val = x[i].value() ski_height = ski_heights[x[i].value()] diff = ski_height - skier_heights[i] print('Skier",
"Illustrate your algorithm by explicitly filling out the A[i, j] table for the",
"[1, 2, 5, 7, 13, 21] skier_heights = [3, 4, 7, 11, 18]",
"= ski_heights[x[i].value()] diff = ski_height - skier_heights[i] print('Skier %i: Ski %i with length",
"from cpmpy_hakank import * def ski_assignment(): # data num_skis = 6 num_skiers =",
"up with the Mount Baldy Ski Resort. They'll let you ski all winter",
"height matches his or her own height exactly. Unfortunately, this is generally not",
"sum of the disparities. ... Illustrate your algorithm by explicitly filling out the",
"skis to skiers that minimizes the sum of the disparities. ... Illustrate your",
"= 0 if ss.solve(): num_solutions += 1 print(\"total differences:\", z.value()) for i in",
"(i, x_val, ski_height, diff)) print() print() print('num_solutions:', num_solutions) return ss ss = ski_assignment()",
"of the difference between the height of the skier and the pair of",
"Baldy Ski Resort. They'll let you ski all winter for free in exchange",
"explicitly filling out the A[i, j] table for the following sample data: *",
"all winter for free in exchange for helping their ski rental shop with",
"for i in range(num_skiers)] )] ss = CPM_ortools(model) num_solutions = 0 if ss.solve():",
"and his or her skis to be the absolute value of the difference",
"to become a ski bum. You've hooked up with the Mount Baldy Ski",
"if ss.solve(): num_solutions += 1 print(\"total differences:\", z.value()) for i in range(num_skiers): x_val",
"from cpmpy import * import cpmpy.solvers import numpy as np from cpmpy_hakank import",
"http://hakank.org/cpmpy/ \"\"\" from cpmpy import * import cpmpy.solvers import numpy as np from",
"<gh_stars>100-1000 \"\"\" Ski assignment in cpmpy From <NAME>, Jr.: PIC 60, Fall 2008",
"skis whose height matches his or her own height exactly. Unfortunately, this is",
"Unfortunately, this is generally not possible. We define the disparity between a skier",
"of skis. Our objective is to find an assignment of skis to skiers",
"\"\"\" Ski assignment in cpmpy From <NAME>, Jr.: PIC 60, Fall 2008 Final",
"x[i].value() ski_height = ski_heights[x[i].value()] diff = ski_height - skier_heights[i] print('Skier %i: Ski %i",
"disparity between a skier and his or her skis to be the absolute",
"a pair of skis whose height matches his or her own height exactly.",
"own height exactly. Unfortunately, this is generally not possible. We define the disparity",
"filling out the A[i, j] table for the following sample data: * Ski",
"5, 7, 13, 21. * Skier heights: 3, 4, 7, 11, 18. '''",
"# which ski to choose for each skier x = intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z =",
"= Model(minimize=z) # constraints model += [AllDifferent(x)] # model += [z == sum([abs(ski_heights[x[i]]",
")] model += [z == sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for i in range(num_skiers)] )]",
"[AllDifferent(x)] # model += [z == sum([abs(ski_heights[x[i]] - skier_heights[i]) for i in range(num_skiers)]",
"pleasant but in the winter you've decided to become a ski bum. You've",
"2008 Final Review, December 12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5. Ski Optimization! Your job",
"data: * Ski heights: 1, 2, 5, 7, 13, 21. * Skier heights:",
"A[i, j] table for the following sample data: * Ski heights: 1, 2,",
"to assign skis to skiers. Ideally, each skier should obtain a pair of",
"in range(num_skiers)] )] model += [z == sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for i in",
"+= [z == sum([abs(ski_heights[x[i]] - skier_heights[i]) for i in range(num_skiers)] )] model +=",
"and the pair of skis. Our objective is to find an assignment of",
"model = Model(minimize=z) # constraints model += [AllDifferent(x)] # model += [z ==",
"2, 5, 7, 13, 21. * Skier heights: 3, 4, 7, 11, 18.",
"difference between the height of the skier and the pair of skis. Our",
"choose for each skier x = intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z = intvar(0, sum(ski_heights), name=\"z\") model",
"num_solutions += 1 print(\"total differences:\", z.value()) for i in range(num_skiers): x_val = x[i].value()",
"Your job at Snapple is pleasant but in the winter you've decided to",
"is pleasant but in the winter you've decided to become a ski bum.",
"sum([abs(ski_heights[x[i]] - skier_heights[i]) for i in range(num_skiers)] )] model += [z == sum([abs(Element(ski_heights,x[i])",
"to skiers. Ideally, each skier should obtain a pair of skis whose height",
"the sum of the disparities. ... Illustrate your algorithm by explicitly filling out",
"cpmpy import * import cpmpy.solvers import numpy as np from cpmpy_hakank import *",
"= [1, 2, 5, 7, 13, 21] skier_heights = [3, 4, 7, 11,",
"to be the absolute value of the difference between the height of the",
"each skier should obtain a pair of skis whose height matches his or",
"def ski_assignment(): # data num_skis = 6 num_skiers = 5 ski_heights = [1,",
"# data num_skis = 6 num_skiers = 5 ski_heights = [1, 2, 5,",
"define the disparity between a skier and his or her skis to be",
"- skier_heights[i]) for i in range(num_skiers)] )] model += [z == sum([abs(Element(ski_heights,x[i]) -",
"written by <NAME> (<EMAIL>) See also my cpmpy page: http://hakank.org/cpmpy/ \"\"\" from cpmpy",
"model += [AllDifferent(x)] # model += [z == sum([abs(ski_heights[x[i]] - skier_heights[i]) for i",
"skis to be the absolute value of the difference between the height of",
"= [3, 4, 7, 11, 18] # which ski to choose for each",
"Ski heights: 1, 2, 5, 7, 13, 21. * Skier heights: 3, 4,",
"exactly. Unfortunately, this is generally not possible. We define the disparity between a",
"+= [AllDifferent(x)] # model += [z == sum([abs(ski_heights[x[i]] - skier_heights[i]) for i in",
"CPM_ortools(model) num_solutions = 0 if ss.solve(): num_solutions += 1 print(\"total differences:\", z.value()) for",
"You've hooked up with the Mount Baldy Ski Resort. They'll let you ski",
"absolute value of the difference between the height of the skier and the",
"for the following sample data: * Ski heights: 1, 2, 5, 7, 13,",
"Ski %i with length %2i (diff: %2i)' %\\ (i, x_val, ski_height, diff)) print()",
"hooked up with the Mount Baldy Ski Resort. They'll let you ski all",
"* def ski_assignment(): # data num_skis = 6 num_skiers = 5 ski_heights =",
"[z == sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for i in range(num_skiers)] )] ss = CPM_ortools(model)",
"in range(num_skiers)] )] ss = CPM_ortools(model) num_solutions = 0 if ss.solve(): num_solutions +=",
"ski to choose for each skier x = intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z = intvar(0, sum(ski_heights),",
"= 6 num_skiers = 5 ski_heights = [1, 2, 5, 7, 13, 21]",
"num_solutions = 0 if ss.solve(): num_solutions += 1 print(\"total differences:\", z.value()) for i",
"an assignment of skis to skiers that minimizes the sum of the disparities.",
"of the skier and the pair of skis. Our objective is to find",
"algorithm to assign skis to skiers. Ideally, each skier should obtain a pair",
"for each skier x = intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z = intvar(0, sum(ski_heights), name=\"z\") model =",
"* Skier heights: 3, 4, 7, 11, 18. ''' This cpmpy model was",
"\"\"\" from cpmpy import * import cpmpy.solvers import numpy as np from cpmpy_hakank",
"is to find an assignment of skis to skiers that minimizes the sum",
"print(\"total differences:\", z.value()) for i in range(num_skiers): x_val = x[i].value() ski_height = ski_heights[x[i].value()]",
"Optimization! Your job at Snapple is pleasant but in the winter you've decided",
"helping their ski rental shop with an algorithm to assign skis to skiers.",
"ski_height = ski_heights[x[i].value()] diff = ski_height - skier_heights[i] print('Skier %i: Ski %i with",
"data num_skis = 6 num_skiers = 5 ski_heights = [1, 2, 5, 7,",
"value of the difference between the height of the skier and the pair",
"1 print(\"total differences:\", z.value()) for i in range(num_skiers): x_val = x[i].value() ski_height =",
"the difference between the height of the skier and the pair of skis.",
"not possible. We define the disparity between a skier and his or her",
"Our objective is to find an assignment of skis to skiers that minimizes",
"skier and his or her skis to be the absolute value of the",
"the absolute value of the difference between the height of the skier and",
"was written by <NAME> (<EMAIL>) See also my cpmpy page: http://hakank.org/cpmpy/ \"\"\" from",
"ski_heights = [1, 2, 5, 7, 13, 21] skier_heights = [3, 4, 7,",
"the pair of skis. Our objective is to find an assignment of skis",
"5, 7, 13, 21] skier_heights = [3, 4, 7, 11, 18] # which",
"cpmpy page: http://hakank.org/cpmpy/ \"\"\" from cpmpy import * import cpmpy.solvers import numpy as",
"18. ''' This cpmpy model was written by <NAME> (<EMAIL>) See also my",
"Jr.: PIC 60, Fall 2008 Final Review, December 12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5.",
"<NAME>, Jr.: PIC 60, Fall 2008 Final Review, December 12, 2008 http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf '''",
"np from cpmpy_hakank import * def ski_assignment(): # data num_skis = 6 num_skiers",
"import numpy as np from cpmpy_hakank import * def ski_assignment(): # data num_skis",
"- skier_heights[i] print('Skier %i: Ski %i with length %2i (diff: %2i)' %\\ (i,",
"ski bum. You've hooked up with the Mount Baldy Ski Resort. They'll let",
"height of the skier and the pair of skis. Our objective is to",
"This cpmpy model was written by <NAME> (<EMAIL>) See also my cpmpy page:",
"you've decided to become a ski bum. You've hooked up with the Mount",
"7, 13, 21] skier_heights = [3, 4, 7, 11, 18] # which ski",
"= intvar(0, sum(ski_heights), name=\"z\") model = Model(minimize=z) # constraints model += [AllDifferent(x)] #",
"1, 2, 5, 7, 13, 21. * Skier heights: 3, 4, 7, 11,",
"''' 5. Ski Optimization! Your job at Snapple is pleasant but in the",
"Model(minimize=z) # constraints model += [AllDifferent(x)] # model += [z == sum([abs(ski_heights[x[i]] -",
"<NAME> (<EMAIL>) See also my cpmpy page: http://hakank.org/cpmpy/ \"\"\" from cpmpy import *",
"as np from cpmpy_hakank import * def ski_assignment(): # data num_skis = 6",
"height exactly. Unfortunately, this is generally not possible. We define the disparity between",
"We define the disparity between a skier and his or her skis to",
"of skis whose height matches his or her own height exactly. Unfortunately, this",
"skis. Our objective is to find an assignment of skis to skiers that",
"winter for free in exchange for helping their ski rental shop with an",
"you ski all winter for free in exchange for helping their ski rental",
"Snapple is pleasant but in the winter you've decided to become a ski",
"cpmpy_hakank import * def ski_assignment(): # data num_skis = 6 num_skiers = 5",
"3, 4, 7, 11, 18. ''' This cpmpy model was written by <NAME>",
"Ski Resort. They'll let you ski all winter for free in exchange for",
"or her own height exactly. Unfortunately, this is generally not possible. We define",
"pair of skis. Our objective is to find an assignment of skis to",
"possible. We define the disparity between a skier and his or her skis",
"with the Mount Baldy Ski Resort. They'll let you ski all winter for",
"- skier_heights[i]) for i in range(num_skiers)] )] ss = CPM_ortools(model) num_solutions = 0",
"ski all winter for free in exchange for helping their ski rental shop",
"my cpmpy page: http://hakank.org/cpmpy/ \"\"\" from cpmpy import * import cpmpy.solvers import numpy",
"model += [z == sum([abs(ski_heights[x[i]] - skier_heights[i]) for i in range(num_skiers)] )] model",
"5. Ski Optimization! Your job at Snapple is pleasant but in the winter",
"= intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z = intvar(0, sum(ski_heights), name=\"z\") model = Model(minimize=z) # constraints model",
"between the height of the skier and the pair of skis. Our objective",
"each skier x = intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z = intvar(0, sum(ski_heights), name=\"z\") model = Model(minimize=z)",
"= CPM_ortools(model) num_solutions = 0 if ss.solve(): num_solutions += 1 print(\"total differences:\", z.value())",
"sample data: * Ski heights: 1, 2, 5, 7, 13, 21. * Skier",
"i in range(num_skiers)] )] ss = CPM_ortools(model) num_solutions = 0 if ss.solve(): num_solutions",
"his or her own height exactly. Unfortunately, this is generally not possible. We",
"num_skiers = 5 ski_heights = [1, 2, 5, 7, 13, 21] skier_heights =",
"numpy as np from cpmpy_hakank import * def ski_assignment(): # data num_skis =",
"but in the winter you've decided to become a ski bum. You've hooked",
"http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf ''' 5. Ski Optimization! Your job at Snapple is pleasant but in",
"of the disparities. ... Illustrate your algorithm by explicitly filling out the A[i,",
"whose height matches his or her own height exactly. Unfortunately, this is generally",
"See also my cpmpy page: http://hakank.org/cpmpy/ \"\"\" from cpmpy import * import cpmpy.solvers",
"* Ski heights: 1, 2, 5, 7, 13, 21. * Skier heights: 3,",
"x_val = x[i].value() ski_height = ski_heights[x[i].value()] diff = ski_height - skier_heights[i] print('Skier %i:",
"for i in range(num_skiers): x_val = x[i].value() ski_height = ski_heights[x[i].value()] diff = ski_height",
"find an assignment of skis to skiers that minimizes the sum of the",
"21] skier_heights = [3, 4, 7, 11, 18] # which ski to choose",
"range(num_skiers)] )] model += [z == sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for i in range(num_skiers)]",
"* import cpmpy.solvers import numpy as np from cpmpy_hakank import * def ski_assignment():",
"# constraints model += [AllDifferent(x)] # model += [z == sum([abs(ski_heights[x[i]] - skier_heights[i])",
"11, 18. ''' This cpmpy model was written by <NAME> (<EMAIL>) See also",
"%2i)' %\\ (i, x_val, ski_height, diff)) print() print() print('num_solutions:', num_solutions) return ss ss",
"13, 21. * Skier heights: 3, 4, 7, 11, 18. ''' This cpmpy",
"i in range(num_skiers)] )] model += [z == sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for i",
"z.value()) for i in range(num_skiers): x_val = x[i].value() ski_height = ski_heights[x[i].value()] diff =",
"import cpmpy.solvers import numpy as np from cpmpy_hakank import * def ski_assignment(): #",
"winter you've decided to become a ski bum. You've hooked up with the",
"== sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for i in range(num_skiers)] )] ss = CPM_ortools(model) num_solutions",
"ski_height - skier_heights[i] print('Skier %i: Ski %i with length %2i (diff: %2i)' %\\",
"7, 11, 18. ''' This cpmpy model was written by <NAME> (<EMAIL>) See",
"(<EMAIL>) See also my cpmpy page: http://hakank.org/cpmpy/ \"\"\" from cpmpy import * import",
"Ski Optimization! Your job at Snapple is pleasant but in the winter you've",
"5 ski_heights = [1, 2, 5, 7, 13, 21] skier_heights = [3, 4,",
"in exchange for helping their ski rental shop with an algorithm to assign",
"x = intvar(0,num_skis-1,shape=num_skiers,name=\"x\") z = intvar(0, sum(ski_heights), name=\"z\") model = Model(minimize=z) # constraints",
"13, 21] skier_heights = [3, 4, 7, 11, 18] # which ski to",
"constraints model += [AllDifferent(x)] # model += [z == sum([abs(ski_heights[x[i]] - skier_heights[i]) for",
"in range(num_skiers): x_val = x[i].value() ski_height = ski_heights[x[i].value()] diff = ski_height - skier_heights[i]",
"objective is to find an assignment of skis to skiers that minimizes the",
"the height of the skier and the pair of skis. Our objective is",
"name=\"z\") model = Model(minimize=z) # constraints model += [AllDifferent(x)] # model += [z",
"ski_heights[x[i].value()] diff = ski_height - skier_heights[i] print('Skier %i: Ski %i with length %2i"
] |
[
"self.regexed_items[x][0] if self.regexed_items[x][1]: txt = short_old + ' => ' + short_new else:",
"2019 # GitHub: KasumiL5x import re import PySide2.QtCore as QC import PySide2.QtGui as",
"x[0] try: subbed_name = re.sub(pattern, subs, x[0]) subbed_name = mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) # make it",
"non-matching elements result = [x for idx, x in enumerate(result) if idx not",
"#end class MaximumReplacer(QW.QDialog): def __init__(self, parent=get_maya_window()): QW.QDialog.__init__(self, parent=parent) # [(short_name, long_name), ...] self.selected_items",
"gb_selection = QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection) # self.rb_select_all = QW.QRadioButton() self.rb_select_all.setText('All')",
"self.txt_filter_name = QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name) # self.txt_filter_type = QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...') gb_selection.layout().addWidget(self.txt_filter_type)",
"lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex) # self.txt_replace_expr = QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr) # subs_widget = QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout())",
"if len(pattern): to_remove = [] for idx in range(len(result)): try: if None ==",
"rename of ' + str(number_different) + ' objects?' dialog_result = mc.confirmDialog(title='Maximum Replacer', message=dialog_msg,",
"g_maximum_replacer_inst.deleteLater() except: pass g_maximum_replacer_inst = MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show() #end # uncomment this to",
"all objects if self.rb_select_all.isChecked(): result = zip(self.get_real_short_names(False), mc.ls(long=True)) # selected objects if self.rb_select_sel.isChecked():",
"= self.txt_filter_name.text() if len(pattern): to_remove = [] for idx in range(len(result)): try: if",
"__init__(self, parent=get_maya_window()): QW.QDialog.__init__(self, parent=parent) # [(short_name, long_name), ...] self.selected_items = [] # [(regexed_short_name,",
"subbed_name = x[0] try: subbed_name = re.sub(pattern, subs, x[0]) subbed_name = mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) #",
"# safety check if None == self.selected_items or None == self.regexed_items: return #",
"self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit) # initial self.update() #end # called when any",
"= QW.QLabel() lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst) # self.txt_replace_subs = QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs) # Preview Section",
"try: subbed_name = re.sub(pattern, subs, x[0]) subbed_name = mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) # make it maya-valid",
"self.setMinimumWidth(380) self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5, 5, 5, 5) self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop) # Selection Section #",
"zip(self.get_real_short_names(True), mc.ls(sl=True, long=True)) # filter by type filter_type = self.txt_filter_type.text() if len(filter_type): to_remove",
"message=dialog_msg, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No') if 'No' == dialog_result: return # undo",
"3. update list view with a preview of changes bold_font = QG.QFont('', -1,",
"# lbl_regex = QW.QLabel() lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex) # self.txt_replace_expr = QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr) #",
"parent #end class MaximumReplacer(QW.QDialog): def __init__(self, parent=get_maya_window()): QW.QDialog.__init__(self, parent=parent) # [(short_name, long_name), ...]",
"# update while typing checkbox self.chk_update_while_typing = QW.QCheckBox() self.chk_update_while_typing.setText('Update while typing') self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing,",
"QW.QRadioButton() self.rb_select_all.setText('All') self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all) # self.rb_select_sel = QW.QRadioButton() self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel) # self.txt_filter_name =",
"PySide2.QtWidgets as QW import shiboken2 import maya.cmds as mc import maya.mel as mel",
"self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight) # connections self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited)",
"mc.ls(long=True)) # selected objects if self.rb_select_sel.isChecked(): result = zip(self.get_real_short_names(True), mc.ls(sl=True, long=True)) # filter",
"omui def get_maya_window(): ptr = omui.MQtUtil.mainWindow() parent = shiboken2.wrapInstance(long(ptr), QW.QDialog) return parent #end",
"to_remove = [] for idx in range(len(result)): node_type = mc.nodeType(result[idx][1]) try: if None",
"info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum Replacer v1.2 <a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft) # update while typing",
"self.txt_replace_expr = QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr) # subs_widget = QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget)",
"self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit) # initial self.update() #end # called when any text changes in",
"False)) # failed so just pass through data and make it not changed",
"# confirm dialog number_different = len([x for x in self.regexed_items if x[1]]) dialog_msg",
"self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr) # subs_widget = QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget) # lbl_subst =",
"pass g_maximum_replacer_inst = MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show() #end # uncomment this to run directly",
"result = [] # all objects if self.rb_select_all.isChecked(): result = zip(self.get_real_short_names(False), mc.ls(long=True)) #",
"zip(self.get_real_short_names(False), mc.ls(long=True)) # selected objects if self.rb_select_sel.isChecked(): result = zip(self.get_real_short_names(True), mc.ls(sl=True, long=True)) #",
"update list view with a preview of changes bold_font = QG.QFont('', -1, QG.QFont.Bold,",
"QG import PySide2.QtWidgets as QW import shiboken2 import maya.cmds as mc import maya.mel",
"gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection) # self.rb_select_all = QW.QRadioButton() self.rb_select_all.setText('All') self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all) #",
"footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget) # copyright! info_lbl = QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum Replacer v1.2 <a",
"def get_real_short_names(self, selected): result = [] for x in mc.ls(sl=selected, shortNames=True): result.append(x[x.rfind('|')+1:]) #",
"don't need changing if not self.regexed_items[x][1]: continue old_name = self.selected_items[x][1] # old LONG",
"result #end def update(self): # 1. get the selection self.selected_items = self.get_selection() #",
"result.append(x[x.rfind('|')+1:]) # basically strip all after last | (the |+1 becomes 0 if",
"return result #end def update(self): # 1. get the selection self.selected_items = self.get_selection()",
"self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit) # initial self.update() #end",
"any text changes in text fields def on_text_changed(self): if not self.chk_update_while_typing.isChecked(): return self.update()",
"range(len(result)): try: if None == re.search(pattern, result[idx][0]): to_remove.append(idx) except: continue #end for #",
"import PySide2.QtCore as QC import PySide2.QtGui as QG import PySide2.QtWidgets as QW import",
"remove all non-matching elements result = [x for idx, x in enumerate(result) if",
"(in REVERSE order as to not break the hierarchy) for x in reversed(range(len(self.selected_items))):",
"names mc.undoInfo(openChunk=True, chunkName='MaximumReplacer') # rename all objects (in REVERSE order as to not",
"becomes 0 if the find fails, so it's okay to fail) return result",
"== self.selected_items or None == self.regexed_items: return # confirm dialog number_different = len([x",
"self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5, 5, 5, 5) self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop) # Selection Section # gb_selection =",
"subbed_name = re.sub(pattern, subs, x[0]) subbed_name = mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) # make it maya-valid result.append((subbed_name,",
"# initial self.update() #end # called when any text changes in text fields",
"not changed return result #end def update(self): # 1. get the selection self.selected_items",
"in to_remove] #end # filter by expression pattern = self.txt_filter_name.text() if len(pattern): to_remove",
"def get_selection(self, regex=None): result = [] # all objects if self.rb_select_all.isChecked(): result =",
"= short_old self.lv_preview.addItem(txt) if self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end def commit(self): # safety check if",
"commit(self): # safety check if None == self.selected_items or None == self.regexed_items: return",
"for x in self.regexed_items if x[1]]) dialog_msg = 'Confirm rename of ' +",
"self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit) # initial self.update() #end # called when any text",
"self.selected_items[x][1] # old LONG name new_name = self.regexed_items[x][0] # new SHORT name try:",
"subs, x[0]) subbed_name = mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) # make it maya-valid result.append((subbed_name, subbed_name != x[0]))",
"[] self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum Replacer') self.setMinimumWidth(380) self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5, 5, 5, 5) self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop)",
"number_different = len([x for x in self.regexed_items if x[1]]) dialog_msg = 'Confirm rename",
"mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) # make it maya-valid result.append((subbed_name, subbed_name != x[0])) # (regex name, changed",
"create(): global g_maximum_replacer_inst try: g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater() except: pass g_maximum_replacer_inst = MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show()",
"this to run directly from the script editor (or call it from a",
"QC.Qt.AlignRight) # connections self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update)",
"self.txt_filter_type.text() if len(filter_type): to_remove = [] for idx in range(len(result)): node_type = mc.nodeType(result[idx][1])",
"shortNames=True): result.append(x[x.rfind('|')+1:]) # basically strip all after last | (the |+1 becomes 0",
"just pass through data and make it not changed return result #end def",
"to rename %s: %s' % (old_name, e) # end chunk! mc.undoInfo(closeChunk=True) # refresh",
"after last | (the |+1 becomes 0 if the find fails, so it's",
"result = zip(self.get_real_short_names(False), mc.ls(long=True)) # selected objects if self.rb_select_sel.isChecked(): result = zip(self.get_real_short_names(True), mc.ls(sl=True,",
"#end # called when any text changes in text fields def on_text_changed(self): if",
"self.txt_replace_expr.text() subs = self.txt_replace_subs.text() result = [] for x in self.selected_items: subbed_name =",
"<a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft) # update while typing checkbox self.chk_update_while_typing = QW.QCheckBox()",
"QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget) # lbl_subst = QW.QLabel() lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst) # self.txt_replace_subs",
"text changes in text fields def on_text_changed(self): if not self.chk_update_while_typing.isChecked(): return self.update() #end",
"= mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) # make it maya-valid result.append((subbed_name, subbed_name != x[0])) # (regex name,",
"= zip(self.get_real_short_names(True), mc.ls(sl=True, long=True)) # filter by type filter_type = self.txt_filter_type.text() if len(filter_type):",
"in to_remove] #end return result #end def calculate_regexed_names(self): pattern = self.txt_replace_expr.text() subs =",
"idx in range(len(result)): try: if None == re.search(pattern, result[idx][0]): to_remove.append(idx) except: continue #end",
"= QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular Expression') self.layout().addWidget(gb_expression) # expr_widget = QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout())",
"= QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr) # subs_widget = QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget) #",
"okay to fail) return result #end def get_selection(self, regex=None): result = [] #",
"# gb_expression = QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular Expression') self.layout().addWidget(gb_expression) # expr_widget =",
"self.chk_update_while_typing.setText('Update while typing') self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight) # connections self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed)",
"(e.g. return pressed) def on_text_edited(self): if self.chk_update_while_typing.isChecked(): return self.update() #end def edit_done(self): print",
"1. get the selection self.selected_items = self.get_selection() # 2. get the regex'd versions",
"# Selection Section # gb_selection = QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection) #",
"mc import maya.mel as mel import maya.OpenMayaUI as omui def get_maya_window(): ptr =",
"def update(self): # 1. get the selection self.selected_items = self.get_selection() # 2. get",
"footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight) # connections self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited)",
"QW.QDialog.__init__(self, parent=parent) # [(short_name, long_name), ...] self.selected_items = [] # [(regexed_short_name, different_from_original), ...]",
"self.selected_items = self.get_selection() # 2. get the regex'd versions self.regexed_items = self.calculate_regexed_names() #",
"selection self.selected_items = self.get_selection() # 2. get the regex'd versions self.regexed_items = self.calculate_regexed_names()",
"edit_done(self): print 'Editing done' #end def get_real_short_names(self, selected): result = [] for x",
"= self.regexed_items[x][0] if self.regexed_items[x][1]: txt = short_old + ' => ' + short_new",
"self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs) # Preview Section # gb_preview = QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview')",
"as QW import shiboken2 import maya.cmds as mc import maya.mel as mel import",
"ptr = omui.MQtUtil.mainWindow() parent = shiboken2.wrapInstance(long(ptr), QW.QDialog) return parent #end class MaximumReplacer(QW.QDialog): def",
"type filter_type = self.txt_filter_type.text() if len(filter_type): to_remove = [] for idx in range(len(result)):",
"None == self.regexed_items: return # confirm dialog number_different = len([x for x in",
"gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview) # self.lv_preview = QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview) # Button! self.btn_commit = QW.QPushButton() self.btn_commit.setText('Commit')",
"return # undo chunk for all names mc.undoInfo(openChunk=True, chunkName='MaximumReplacer') # rename all objects",
"gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview) # self.lv_preview = QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview) # Button! self.btn_commit = QW.QPushButton()",
"= QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview) # Button! self.btn_commit = QW.QPushButton() self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit) # footer footer_widget",
"of changes bold_font = QG.QFont('', -1, QG.QFont.Bold, False) self.lv_preview.clear() for x in range(len(self.selected_items)):",
"import PySide2.QtGui as QG import PySide2.QtWidgets as QW import shiboken2 import maya.cmds as",
"parent=parent) # [(short_name, long_name), ...] self.selected_items = [] # [(regexed_short_name, different_from_original), ...] maps",
"def edit_done(self): print 'Editing done' #end def get_real_short_names(self, selected): result = [] for",
"while typing checkbox self.chk_update_while_typing = QW.QCheckBox() self.chk_update_while_typing.setText('Update while typing') self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight)",
"as QG import PySide2.QtWidgets as QW import shiboken2 import maya.cmds as mc import",
"QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr) # subs_widget = QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget) # lbl_subst",
"try: if None == re.search(pattern, result[idx][0]): to_remove.append(idx) except: continue #end for # remove",
"= zip(self.get_real_short_names(False), mc.ls(long=True)) # selected objects if self.rb_select_sel.isChecked(): result = zip(self.get_real_short_names(True), mc.ls(sl=True, long=True))",
"failed so just pass through data and make it not changed return result",
"Section # gb_selection = QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection) # self.rb_select_all =",
"while typing') self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight) # connections self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited)",
"x in self.regexed_items if x[1]]) dialog_msg = 'Confirm rename of ' + str(number_different)",
"href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft) # update while typing checkbox self.chk_update_while_typing = QW.QCheckBox() self.chk_update_while_typing.setText('Update",
"# new SHORT name try: mc.rename(old_name, new_name) except Exception as e: print 'Failed",
"regex=None): result = [] # all objects if self.rb_select_all.isChecked(): result = zip(self.get_real_short_names(False), mc.ls(long=True))",
"= x[0] try: subbed_name = re.sub(pattern, subs, x[0]) subbed_name = mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) # make",
"self.lv_preview = QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview) # Button! self.btn_commit = QW.QPushButton() self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit) # footer",
"QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview) # Button! self.btn_commit = QW.QPushButton() self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit) # footer footer_widget =",
"self.txt_replace_subs = QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs) # Preview Section # gb_preview = QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout())",
"when any text changes in text fields def on_text_changed(self): if not self.chk_update_while_typing.isChecked(): return",
"short_new else: txt = short_old self.lv_preview.addItem(txt) if self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end def commit(self): #",
"print 'Failed to rename %s: %s' % (old_name, e) # end chunk! mc.undoInfo(closeChunk=True)",
"'Confirm rename of ' + str(number_different) + ' objects?' dialog_result = mc.confirmDialog(title='Maximum Replacer',",
"break the hierarchy) for x in reversed(range(len(self.selected_items))): # ignore nodes that don't need",
"uncomment this to run directly from the script editor (or call it from",
"re.sub(pattern, subs, x[0]) subbed_name = mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) # make it maya-valid result.append((subbed_name, subbed_name !=",
"[] # all objects if self.rb_select_all.isChecked(): result = zip(self.get_real_short_names(False), mc.ls(long=True)) # selected objects",
"new_name = self.regexed_items[x][0] # new SHORT name try: mc.rename(old_name, new_name) except Exception as",
"subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget) # lbl_subst = QW.QLabel() lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst) # self.txt_replace_subs = QW.QLineEdit()",
"PySide2.QtGui as QG import PySide2.QtWidgets as QW import shiboken2 import maya.cmds as mc",
"lbl_subst = QW.QLabel() lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst) # self.txt_replace_subs = QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs) # Preview",
"= self.selected_items[x][0] short_new = self.regexed_items[x][0] if self.regexed_items[x][1]: txt = short_old + ' =>",
"QG.QFont.Bold, False) self.lv_preview.clear() for x in range(len(self.selected_items)): short_old = self.selected_items[x][0] short_new = self.regexed_items[x][0]",
"expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget) # lbl_regex = QW.QLabel() lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex) # self.txt_replace_expr = QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...')",
"= QW.QLabel() lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex) # self.txt_replace_expr = QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr) # subs_widget =",
"original) except: result.append((subbed_name, False)) # failed so just pass through data and make",
"def __init__(self, parent=get_maya_window()): QW.QDialog.__init__(self, parent=parent) # [(short_name, long_name), ...] self.selected_items = [] #",
"# Expression Section # gb_expression = QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular Expression') self.layout().addWidget(gb_expression)",
"self.chk_update_while_typing.isChecked(): return self.update() #end # called when changes have been committed in text",
"LONG name new_name = self.regexed_items[x][0] # new SHORT name try: mc.rename(old_name, new_name) except",
"gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular Expression') self.layout().addWidget(gb_expression) # expr_widget = QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5)",
"if self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end def commit(self): # safety check if None == self.selected_items",
"different_from_original), ...] maps 1-1 with the above in size self.regexed_items = [] self.setWindowFlags(QC.Qt.Window)",
"so it's okay to fail) return result #end def get_selection(self, regex=None): result =",
"# called when any text changes in text fields def on_text_changed(self): if not",
"except: pass g_maximum_replacer_inst = MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show() #end # uncomment this to run",
"# subs_widget = QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget) # lbl_subst = QW.QLabel() lbl_subst.setText('Substitute')",
"return self.update() #end # called when changes have been committed in text fields",
"== dialog_result: return # undo chunk for all names mc.undoInfo(openChunk=True, chunkName='MaximumReplacer') # rename",
"result[idx][0]): to_remove.append(idx) except: continue #end for # remove all non-matching elements result =",
"maps 1-1 with the above in size self.regexed_items = [] self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum Replacer')",
"None == re.search(filter_type, node_type): to_remove.append(idx) except: continue #end for # remove all non-matching",
"pattern = self.txt_replace_expr.text() subs = self.txt_replace_subs.text() result = [] for x in self.selected_items:",
"mc.rename(old_name, new_name) except Exception as e: print 'Failed to rename %s: %s' %",
"versions self.regexed_items = self.calculate_regexed_names() # 3. update list view with a preview of",
"expr_widget.layout().addWidget(self.txt_replace_expr) # subs_widget = QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget) # lbl_subst = QW.QLabel()",
"continue #end for # remove all non-matching elements result = [x for idx,",
"to run directly from the script editor (or call it from a shelf)",
"<NAME>, 2019 # GitHub: KasumiL5x import re import PySide2.QtCore as QC import PySide2.QtGui",
"# [(short_name, long_name), ...] self.selected_items = [] # [(regexed_short_name, different_from_original), ...] maps 1-1",
"get the regex'd versions self.regexed_items = self.calculate_regexed_names() # 3. update list view with",
"# failed so just pass through data and make it not changed return",
"find fails, so it's okay to fail) return result #end def get_selection(self, regex=None):",
"[(regexed_short_name, different_from_original), ...] maps 1-1 with the above in size self.regexed_items = []",
"self.get_selection() # 2. get the regex'd versions self.regexed_items = self.calculate_regexed_names() # 3. update",
"...] maps 1-1 with the above in size self.regexed_items = [] self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum",
"as mel import maya.OpenMayaUI as omui def get_maya_window(): ptr = omui.MQtUtil.mainWindow() parent =",
"footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget) # copyright! info_lbl = QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum Replacer",
"self.regexed_items if x[1]]) dialog_msg = 'Confirm rename of ' + str(number_different) + '",
"= QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name) # self.txt_filter_type = QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...') gb_selection.layout().addWidget(self.txt_filter_type) #",
"len(pattern): to_remove = [] for idx in range(len(result)): try: if None == re.search(pattern,",
"QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum Replacer v1.2 <a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft) # update",
"= self.txt_replace_subs.text() result = [] for x in self.selected_items: subbed_name = x[0] try:",
"mc.undoInfo(openChunk=True, chunkName='MaximumReplacer') # rename all objects (in REVERSE order as to not break",
"QW.QLabel() lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst) # self.txt_replace_subs = QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs) # Preview Section #",
"expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget) # lbl_regex = QW.QLabel() lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex) # self.txt_replace_expr = QW.QLineEdit()",
"done' #end def get_real_short_names(self, selected): result = [] for x in mc.ls(sl=selected, shortNames=True):",
"confirm dialog number_different = len([x for x in self.regexed_items if x[1]]) dialog_msg =",
"x[0]) subbed_name = mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) # make it maya-valid result.append((subbed_name, subbed_name != x[0])) #",
"mc.nodeType(result[idx][1]) try: if None == re.search(filter_type, node_type): to_remove.append(idx) except: continue #end for #",
"last | (the |+1 becomes 0 if the find fails, so it's okay",
"' objects?' dialog_result = mc.confirmDialog(title='Maximum Replacer', message=dialog_msg, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No') if",
"self.setWindowTitle('Maximum Replacer') self.setMinimumWidth(380) self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5, 5, 5, 5) self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop) # Selection",
"all objects (in REVERSE order as to not break the hierarchy) for x",
"changes in text fields def on_text_changed(self): if not self.chk_update_while_typing.isChecked(): return self.update() #end #",
"return # confirm dialog number_different = len([x for x in self.regexed_items if x[1]])",
"mc.undoInfo(closeChunk=True) # refresh view self.update() #end #end def create(): global g_maximum_replacer_inst try: g_maximum_replacer_inst.close()",
"= QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget) # lbl_subst = QW.QLabel() lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst) #",
"import maya.mel as mel import maya.OpenMayaUI as omui def get_maya_window(): ptr = omui.MQtUtil.mainWindow()",
"for x in reversed(range(len(self.selected_items))): # ignore nodes that don't need changing if not",
"dialog_msg = 'Confirm rename of ' + str(number_different) + ' objects?' dialog_result =",
"QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget) # copyright! info_lbl = QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum",
"enumerate(result) if idx not in to_remove] #end return result #end def calculate_regexed_names(self): pattern",
"for x in range(len(self.selected_items)): short_old = self.selected_items[x][0] short_new = self.regexed_items[x][0] if self.regexed_items[x][1]: txt",
"if self.rb_select_sel.isChecked(): result = zip(self.get_real_short_names(True), mc.ls(sl=True, long=True)) # filter by type filter_type =",
"Replacer', message=dialog_msg, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No') if 'No' == dialog_result: return #",
"QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection) # self.rb_select_all = QW.QRadioButton() self.rb_select_all.setText('All') self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all)",
"self.lv_preview.addItem(txt) if self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end def commit(self): # safety check if None ==",
"REVERSE order as to not break the hierarchy) for x in reversed(range(len(self.selected_items))): #",
"# old LONG name new_name = self.regexed_items[x][0] # new SHORT name try: mc.rename(old_name,",
"result = [] for x in mc.ls(sl=selected, shortNames=True): result.append(x[x.rfind('|')+1:]) # basically strip all",
"'No'], defaultButton='Yes', cancelButton='No', dismissString='No') if 'No' == dialog_result: return # undo chunk for",
"chunk for all names mc.undoInfo(openChunk=True, chunkName='MaximumReplacer') # rename all objects (in REVERSE order",
"= QW.QCheckBox() self.chk_update_while_typing.setText('Update while typing') self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight) # connections self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed)",
"#end def create(): global g_maximum_replacer_inst try: g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater() except: pass g_maximum_replacer_inst = MaximumReplacer()",
"self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop) # Selection Section # gb_selection = QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection')",
"transform)...') gb_selection.layout().addWidget(self.txt_filter_type) # Expression Section # gb_expression = QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular",
"update while typing checkbox self.chk_update_while_typing = QW.QCheckBox() self.chk_update_while_typing.setText('Update while typing') self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing, 0,",
"selected): result = [] for x in mc.ls(sl=selected, shortNames=True): result.append(x[x.rfind('|')+1:]) # basically strip",
"the find fails, so it's okay to fail) return result #end def get_selection(self,",
"self.calculate_regexed_names() # 3. update list view with a preview of changes bold_font =",
"safety check if None == self.selected_items or None == self.regexed_items: return # confirm",
"Replacer') self.setMinimumWidth(380) self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5, 5, 5, 5) self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop) # Selection Section",
"by expression pattern = self.txt_filter_name.text() if len(pattern): to_remove = [] for idx in",
"gb_selection.layout().addWidget(self.rb_select_sel) # self.txt_filter_name = QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name) # self.txt_filter_type = QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type (e.g.",
"if len(filter_type): to_remove = [] for idx in range(len(result)): node_type = mc.nodeType(result[idx][1]) try:",
"objects?' dialog_result = mc.confirmDialog(title='Maximum Replacer', message=dialog_msg, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No') if 'No'",
"[(short_name, long_name), ...] self.selected_items = [] # [(regexed_short_name, different_from_original), ...] maps 1-1 with",
"try: mc.rename(old_name, new_name) except Exception as e: print 'Failed to rename %s: %s'",
"result = zip(self.get_real_short_names(True), mc.ls(sl=True, long=True)) # filter by type filter_type = self.txt_filter_type.text() if",
"mel import maya.OpenMayaUI as omui def get_maya_window(): ptr = omui.MQtUtil.mainWindow() parent = shiboken2.wrapInstance(long(ptr),",
"rename %s: %s' % (old_name, e) # end chunk! mc.undoInfo(closeChunk=True) # refresh view",
"called when any text changes in text fields def on_text_changed(self): if not self.chk_update_while_typing.isChecked():",
"for x in self.selected_items: subbed_name = x[0] try: subbed_name = re.sub(pattern, subs, x[0])",
"self.update() #end #end def create(): global g_maximum_replacer_inst try: g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater() except: pass g_maximum_replacer_inst",
"in self.selected_items: subbed_name = x[0] try: subbed_name = re.sub(pattern, subs, x[0]) subbed_name =",
"if idx not in to_remove] #end # filter by expression pattern = self.txt_filter_name.text()",
"in text fields def on_text_changed(self): if not self.chk_update_while_typing.isChecked(): return self.update() #end # called",
"QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview) # self.lv_preview = QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview) # Button!",
"def on_text_edited(self): if self.chk_update_while_typing.isChecked(): return self.update() #end def edit_done(self): print 'Editing done' #end",
"if self.rb_select_all.isChecked(): result = zip(self.get_real_short_names(False), mc.ls(long=True)) # selected objects if self.rb_select_sel.isChecked(): result =",
"2. get the regex'd versions self.regexed_items = self.calculate_regexed_names() # 3. update list view",
"it's okay to fail) return result #end def get_selection(self, regex=None): result = []",
"x[0])) # (regex name, changed from original) except: result.append((subbed_name, False)) # failed so",
"= self.calculate_regexed_names() # 3. update list view with a preview of changes bold_font",
"gb_preview = QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview) # self.lv_preview = QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview)",
"MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show() #end # uncomment this to run directly from the script",
"typing') self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight) # connections self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited)",
"# filter by expression pattern = self.txt_filter_name.text() if len(pattern): to_remove = [] for",
"# rename all objects (in REVERSE order as to not break the hierarchy)",
"filter_type = self.txt_filter_type.text() if len(filter_type): to_remove = [] for idx in range(len(result)): node_type",
"old LONG name new_name = self.regexed_items[x][0] # new SHORT name try: mc.rename(old_name, new_name)",
"objects (in REVERSE order as to not break the hierarchy) for x in",
"[] # [(regexed_short_name, different_from_original), ...] maps 1-1 with the above in size self.regexed_items",
"return pressed) def on_text_edited(self): if self.chk_update_while_typing.isChecked(): return self.update() #end def edit_done(self): print 'Editing",
"gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection) # self.rb_select_all = QW.QRadioButton() self.rb_select_all.setText('All') self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all) # self.rb_select_sel =",
"# expr_widget = QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget) # lbl_regex = QW.QLabel() lbl_regex.setText('Pattern')",
"the regex'd versions self.regexed_items = self.calculate_regexed_names() # 3. update list view with a",
"in range(len(result)): node_type = mc.nodeType(result[idx][1]) try: if None == re.search(filter_type, node_type): to_remove.append(idx) except:",
"self.txt_filter_name.text() if len(pattern): to_remove = [] for idx in range(len(result)): try: if None",
"button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No') if 'No' == dialog_result: return # undo chunk",
"fail) return result #end def get_selection(self, regex=None): result = [] # all objects",
"Replacer # <NAME>, 2019 # GitHub: KasumiL5x import re import PySide2.QtCore as QC",
"PySide2.QtCore as QC import PySide2.QtGui as QG import PySide2.QtWidgets as QW import shiboken2",
"order as to not break the hierarchy) for x in reversed(range(len(self.selected_items))): # ignore",
"(the |+1 becomes 0 if the find fails, so it's okay to fail)",
"|+1 becomes 0 if the find fails, so it's okay to fail) return",
"if self.chk_update_while_typing.isChecked(): return self.update() #end def edit_done(self): print 'Editing done' #end def get_real_short_names(self,",
"# filter by type filter_type = self.txt_filter_type.text() if len(filter_type): to_remove = [] for",
"self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end def commit(self): # safety check if None == self.selected_items or",
"g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater() except: pass g_maximum_replacer_inst = MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show() #end # uncomment this",
"#end def update(self): # 1. get the selection self.selected_items = self.get_selection() # 2.",
"# self.txt_replace_subs = QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs) # Preview Section # gb_preview = QW.QGroupBox()",
"view with a preview of changes bold_font = QG.QFont('', -1, QG.QFont.Bold, False) self.lv_preview.clear()",
"# remove all non-matching elements result = [x for idx, x in enumerate(result)",
"make it not changed return result #end def update(self): # 1. get the",
"above in size self.regexed_items = [] self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum Replacer') self.setMinimumWidth(380) self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5,",
"Expression Section # gb_expression = QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular Expression') self.layout().addWidget(gb_expression) #",
"Section # gb_expression = QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular Expression') self.layout().addWidget(gb_expression) # expr_widget",
"= MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show() #end # uncomment this to run directly from the",
"changes have been committed in text fields (e.g. return pressed) def on_text_edited(self): if",
"QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular Expression') self.layout().addWidget(gb_expression) # expr_widget = QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2)",
"False) self.lv_preview.clear() for x in range(len(self.selected_items)): short_old = self.selected_items[x][0] short_new = self.regexed_items[x][0] if",
"def create(): global g_maximum_replacer_inst try: g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater() except: pass g_maximum_replacer_inst = MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose)",
"self.regexed_items[x][1]: continue old_name = self.selected_items[x][1] # old LONG name new_name = self.regexed_items[x][0] #",
"lbl_regex = QW.QLabel() lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex) # self.txt_replace_expr = QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr) # subs_widget",
"# Button! self.btn_commit = QW.QPushButton() self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit) # footer footer_widget = QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout())",
"of ' + str(number_different) + ' objects?' dialog_result = mc.confirmDialog(title='Maximum Replacer', message=dialog_msg, button=['Yes',",
"and make it not changed return result #end def update(self): # 1. get",
"connections self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit) #",
"need changing if not self.regexed_items[x][1]: continue old_name = self.selected_items[x][1] # old LONG name",
"self.layout().setContentsMargins(5, 5, 5, 5) self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop) # Selection Section # gb_selection = QW.QGroupBox()",
"| (the |+1 becomes 0 if the find fails, so it's okay to",
"expr_widget.layout().addWidget(lbl_regex) # self.txt_replace_expr = QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr) # subs_widget = QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2)",
"node_type): to_remove.append(idx) except: continue #end for # remove all non-matching elements result =",
"#end def get_real_short_names(self, selected): result = [] for x in mc.ls(sl=selected, shortNames=True): result.append(x[x.rfind('|')+1:])",
"the above in size self.regexed_items = [] self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum Replacer') self.setMinimumWidth(380) self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout())",
"self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit) # initial self.update()",
"5) self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop) # Selection Section # gb_selection = QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5)",
"try: g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater() except: pass g_maximum_replacer_inst = MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show() #end # uncomment",
"not in to_remove] #end # filter by expression pattern = self.txt_filter_name.text() if len(pattern):",
"x in self.selected_items: subbed_name = x[0] try: subbed_name = re.sub(pattern, subs, x[0]) subbed_name",
"self.regexed_items[x][1]: txt = short_old + ' => ' + short_new else: txt =",
"== self.regexed_items: return # confirm dialog number_different = len([x for x in self.regexed_items",
"pass through data and make it not changed return result #end def update(self):",
"maya.OpenMayaUI as omui def get_maya_window(): ptr = omui.MQtUtil.mainWindow() parent = shiboken2.wrapInstance(long(ptr), QW.QDialog) return",
"if the find fails, so it's okay to fail) return result #end def",
"= mc.nodeType(result[idx][1]) try: if None == re.search(filter_type, node_type): to_remove.append(idx) except: continue #end for",
"mc.confirmDialog(title='Maximum Replacer', message=dialog_msg, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No') if 'No' == dialog_result: return",
"[] for idx in range(len(result)): node_type = mc.nodeType(result[idx][1]) try: if None == re.search(filter_type,",
"parent = shiboken2.wrapInstance(long(ptr), QW.QDialog) return parent #end class MaximumReplacer(QW.QDialog): def __init__(self, parent=get_maya_window()): QW.QDialog.__init__(self,",
"[] for x in mc.ls(sl=selected, shortNames=True): result.append(x[x.rfind('|')+1:]) # basically strip all after last",
"None == self.selected_items or None == self.regexed_items: return # confirm dialog number_different =",
"5, 5, 5) self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop) # Selection Section # gb_selection = QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout())",
"for idx, x in enumerate(result) if idx not in to_remove] #end # filter",
"view self.update() #end #end def create(): global g_maximum_replacer_inst try: g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater() except: pass",
"MaximumReplacer(QW.QDialog): def __init__(self, parent=get_maya_window()): QW.QDialog.__init__(self, parent=parent) # [(short_name, long_name), ...] self.selected_items = []",
"subbed_name = mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) # make it maya-valid result.append((subbed_name, subbed_name != x[0])) # (regex",
"# undo chunk for all names mc.undoInfo(openChunk=True, chunkName='MaximumReplacer') # rename all objects (in",
"'Failed to rename %s: %s' % (old_name, e) # end chunk! mc.undoInfo(closeChunk=True) #",
"as QC import PySide2.QtGui as QG import PySide2.QtWidgets as QW import shiboken2 import",
"to_remove] #end return result #end def calculate_regexed_names(self): pattern = self.txt_replace_expr.text() subs = self.txt_replace_subs.text()",
"self.update() #end # called when changes have been committed in text fields (e.g.",
"class MaximumReplacer(QW.QDialog): def __init__(self, parent=get_maya_window()): QW.QDialog.__init__(self, parent=parent) # [(short_name, long_name), ...] self.selected_items =",
"info_lbl.setText('Maximum Replacer v1.2 <a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft) # update while typing checkbox",
"#end def commit(self): # safety check if None == self.selected_items or None ==",
"shiboken2.wrapInstance(long(ptr), QW.QDialog) return parent #end class MaximumReplacer(QW.QDialog): def __init__(self, parent=get_maya_window()): QW.QDialog.__init__(self, parent=parent) #",
"self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit) # footer footer_widget = QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget) # copyright!",
"self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name) # self.txt_filter_type = QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...') gb_selection.layout().addWidget(self.txt_filter_type) # Expression Section",
"Maximum Replacer # <NAME>, 2019 # GitHub: KasumiL5x import re import PySide2.QtCore as",
"== re.search(pattern, result[idx][0]): to_remove.append(idx) except: continue #end for # remove all non-matching elements",
"self.selected_items: subbed_name = x[0] try: subbed_name = re.sub(pattern, subs, x[0]) subbed_name = mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name))",
"get_selection(self, regex=None): result = [] # all objects if self.rb_select_all.isChecked(): result = zip(self.get_real_short_names(False),",
"self.layout().addWidget(gb_preview) # self.lv_preview = QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview) # Button! self.btn_commit = QW.QPushButton() self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit)",
"# gb_selection = QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection) # self.rb_select_all = QW.QRadioButton()",
"self.layout().addWidget(self.btn_commit) # footer footer_widget = QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget) # copyright! info_lbl",
"except: result.append((subbed_name, False)) # failed so just pass through data and make it",
"# ignore nodes that don't need changing if not self.regexed_items[x][1]: continue old_name =",
"shiboken2 import maya.cmds as mc import maya.mel as mel import maya.OpenMayaUI as omui",
"QW import shiboken2 import maya.cmds as mc import maya.mel as mel import maya.OpenMayaUI",
"# called when changes have been committed in text fields (e.g. return pressed)",
"it not changed return result #end def update(self): # 1. get the selection",
"def commit(self): # safety check if None == self.selected_items or None == self.regexed_items:",
"by type filter_type = self.txt_filter_type.text() if len(filter_type): to_remove = [] for idx in",
"[] for idx in range(len(result)): try: if None == re.search(pattern, result[idx][0]): to_remove.append(idx) except:",
"result = [x for idx, x in enumerate(result) if idx not in to_remove]",
"from original) except: result.append((subbed_name, False)) # failed so just pass through data and",
"size self.regexed_items = [] self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum Replacer') self.setMinimumWidth(380) self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5, 5, 5,",
"str(number_different) + ' objects?' dialog_result = mc.confirmDialog(title='Maximum Replacer', message=dialog_msg, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No',",
"self.update() #end def edit_done(self): print 'Editing done' #end def get_real_short_names(self, selected): result =",
"lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst) # self.txt_replace_subs = QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs) # Preview Section # gb_preview",
"dialog number_different = len([x for x in self.regexed_items if x[1]]) dialog_msg = 'Confirm",
"'No' == dialog_result: return # undo chunk for all names mc.undoInfo(openChunk=True, chunkName='MaximumReplacer') #",
"in range(len(self.selected_items)): short_old = self.selected_items[x][0] short_new = self.regexed_items[x][0] if self.regexed_items[x][1]: txt = short_old",
"GitHub: KasumiL5x import re import PySide2.QtCore as QC import PySide2.QtGui as QG import",
"subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget) # lbl_subst = QW.QLabel() lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst) # self.txt_replace_subs = QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...')",
"not in to_remove] #end return result #end def calculate_regexed_names(self): pattern = self.txt_replace_expr.text() subs",
"self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...') gb_selection.layout().addWidget(self.txt_filter_type) # Expression Section # gb_expression = QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2)",
"%s' % (old_name, e) # end chunk! mc.undoInfo(closeChunk=True) # refresh view self.update() #end",
"Replacer v1.2 <a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft) # update while typing checkbox self.chk_update_while_typing",
"on_text_edited(self): if self.chk_update_while_typing.isChecked(): return self.update() #end def edit_done(self): print 'Editing done' #end def",
"self.rb_select_all.isChecked(): result = zip(self.get_real_short_names(False), mc.ls(long=True)) # selected objects if self.rb_select_sel.isChecked(): result = zip(self.get_real_short_names(True),",
"as omui def get_maya_window(): ptr = omui.MQtUtil.mainWindow() parent = shiboken2.wrapInstance(long(ptr), QW.QDialog) return parent",
"subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget) # lbl_subst = QW.QLabel() lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst) # self.txt_replace_subs =",
"self.regexed_items: return # confirm dialog number_different = len([x for x in self.regexed_items if",
"for all names mc.undoInfo(openChunk=True, chunkName='MaximumReplacer') # rename all objects (in REVERSE order as",
"self.txt_replace_subs.text() result = [] for x in self.selected_items: subbed_name = x[0] try: subbed_name",
"maya-valid result.append((subbed_name, subbed_name != x[0])) # (regex name, changed from original) except: result.append((subbed_name,",
"if not self.regexed_items[x][1]: continue old_name = self.selected_items[x][1] # old LONG name new_name =",
"called when changes have been committed in text fields (e.g. return pressed) def",
"committed in text fields (e.g. return pressed) def on_text_edited(self): if self.chk_update_while_typing.isChecked(): return self.update()",
"subs = self.txt_replace_subs.text() result = [] for x in self.selected_items: subbed_name = x[0]",
"self.regexed_items[x][0] # new SHORT name try: mc.rename(old_name, new_name) except Exception as e: print",
"0, QC.Qt.AlignLeft) # update while typing checkbox self.chk_update_while_typing = QW.QCheckBox() self.chk_update_while_typing.setText('Update while typing')",
"make it maya-valid result.append((subbed_name, subbed_name != x[0])) # (regex name, changed from original)",
"objects if self.rb_select_all.isChecked(): result = zip(self.get_real_short_names(False), mc.ls(long=True)) # selected objects if self.rb_select_sel.isChecked(): result",
"# selected objects if self.rb_select_sel.isChecked(): result = zip(self.get_real_short_names(True), mc.ls(sl=True, long=True)) # filter by",
"self.chk_update_while_typing = QW.QCheckBox() self.chk_update_while_typing.setText('Update while typing') self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight) # connections self.txt_filter_name.textChanged.connect(self.on_text_changed)",
"for idx in range(len(result)): try: if None == re.search(pattern, result[idx][0]): to_remove.append(idx) except: continue",
"g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show() #end # uncomment this to run directly from the script editor",
"# 2. get the regex'd versions self.regexed_items = self.calculate_regexed_names() # 3. update list",
"result.append((subbed_name, subbed_name != x[0])) # (regex name, changed from original) except: result.append((subbed_name, False))",
"name try: mc.rename(old_name, new_name) except Exception as e: print 'Failed to rename %s:",
"subs_widget = QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5) gb_expression.layout().addWidget(subs_widget) # lbl_subst = QW.QLabel() lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst)",
"= [] for idx in range(len(result)): try: if None == re.search(pattern, result[idx][0]): to_remove.append(idx)",
"selected objects if self.rb_select_sel.isChecked(): result = zip(self.get_real_short_names(True), mc.ls(sl=True, long=True)) # filter by type",
"self.rb_select_sel.isChecked(): result = zip(self.get_real_short_names(True), mc.ls(sl=True, long=True)) # filter by type filter_type = self.txt_filter_type.text()",
"self.lv_preview.clear() for x in range(len(self.selected_items)): short_old = self.selected_items[x][0] short_new = self.regexed_items[x][0] if self.regexed_items[x][1]:",
"gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular Expression') self.layout().addWidget(gb_expression) # expr_widget = QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget)",
"dialog_result: return # undo chunk for all names mc.undoInfo(openChunk=True, chunkName='MaximumReplacer') # rename all",
"#end def edit_done(self): print 'Editing done' #end def get_real_short_names(self, selected): result = []",
"initial self.update() #end # called when any text changes in text fields def",
"= [] # all objects if self.rb_select_all.isChecked(): result = zip(self.get_real_short_names(False), mc.ls(long=True)) # selected",
"= [] self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum Replacer') self.setMinimumWidth(380) self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5, 5, 5, 5) self.layout().setSpacing(5)",
"gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection) # self.rb_select_all = QW.QRadioButton() self.rb_select_all.setText('All') self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all) # self.rb_select_sel",
"in size self.regexed_items = [] self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum Replacer') self.setMinimumWidth(380) self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5, 5,",
"check if None == self.selected_items or None == self.regexed_items: return # confirm dialog",
"not self.chk_update_while_typing.isChecked(): return self.update() #end # called when changes have been committed in",
"QG.QFont('', -1, QG.QFont.Bold, False) self.lv_preview.clear() for x in range(len(self.selected_items)): short_old = self.selected_items[x][0] short_new",
"# [(regexed_short_name, different_from_original), ...] maps 1-1 with the above in size self.regexed_items =",
"footer_widget = QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget) # copyright! info_lbl = QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText)",
"= [] for x in self.selected_items: subbed_name = x[0] try: subbed_name = re.sub(pattern,",
"=> ' + short_new else: txt = short_old self.lv_preview.addItem(txt) if self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end",
"== re.search(filter_type, node_type): to_remove.append(idx) except: continue #end for # remove all non-matching elements",
"gb_expression.setTitle('Regular Expression') self.layout().addWidget(gb_expression) # expr_widget = QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget) # lbl_regex",
"self.selected_items or None == self.regexed_items: return # confirm dialog number_different = len([x for",
"= len([x for x in self.regexed_items if x[1]]) dialog_msg = 'Confirm rename of",
"#end # uncomment this to run directly from the script editor (or call",
"= QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget) # lbl_regex = QW.QLabel() lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex) #",
"Section # gb_preview = QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview) # self.lv_preview =",
"...] self.selected_items = [] # [(regexed_short_name, different_from_original), ...] maps 1-1 with the above",
"import shiboken2 import maya.cmds as mc import maya.mel as mel import maya.OpenMayaUI as",
"self.txt_filter_type = QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...') gb_selection.layout().addWidget(self.txt_filter_type) # Expression Section # gb_expression =",
"# Maximum Replacer # <NAME>, 2019 # GitHub: KasumiL5x import re import PySide2.QtCore",
"except: continue #end for # remove all non-matching elements result = [x for",
"self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit) # initial self.update() #end # called when any text changes",
"ignore nodes that don't need changing if not self.regexed_items[x][1]: continue old_name = self.selected_items[x][1]",
"name new_name = self.regexed_items[x][0] # new SHORT name try: mc.rename(old_name, new_name) except Exception",
"= self.regexed_items[x][0] # new SHORT name try: mc.rename(old_name, new_name) except Exception as e:",
"QW.QLabel() lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex) # self.txt_replace_expr = QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr) # subs_widget = QW.QWidget()",
"gb_expression.layout().addWidget(subs_widget) # lbl_subst = QW.QLabel() lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst) # self.txt_replace_subs = QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs)",
"undo chunk for all names mc.undoInfo(openChunk=True, chunkName='MaximumReplacer') # rename all objects (in REVERSE",
"# # Maximum Replacer # <NAME>, 2019 # GitHub: KasumiL5x import re import",
"nodes that don't need changing if not self.regexed_items[x][1]: continue old_name = self.selected_items[x][1] #",
"re.search(filter_type, node_type): to_remove.append(idx) except: continue #end for # remove all non-matching elements result",
"(regex name, changed from original) except: result.append((subbed_name, False)) # failed so just pass",
"refresh view self.update() #end #end def create(): global g_maximum_replacer_inst try: g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater() except:",
"elements result = [x for idx, x in enumerate(result) if idx not in",
"self.rb_select_all.setText('All') self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all) # self.rb_select_sel = QW.QRadioButton() self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel) # self.txt_filter_name = QW.QLineEdit()",
"data and make it not changed return result #end def update(self): # 1.",
"info_lbl = QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum Replacer v1.2 <a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft)",
"so just pass through data and make it not changed return result #end",
"get the selection self.selected_items = self.get_selection() # 2. get the regex'd versions self.regexed_items",
"continue old_name = self.selected_items[x][1] # old LONG name new_name = self.regexed_items[x][0] # new",
"+ short_new else: txt = short_old self.lv_preview.addItem(txt) if self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end def commit(self):",
"# copyright! info_lbl = QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum Replacer v1.2 <a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl,",
"= QW.QRadioButton() self.rb_select_all.setText('All') self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all) # self.rb_select_sel = QW.QRadioButton() self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel) # self.txt_filter_name",
"end chunk! mc.undoInfo(closeChunk=True) # refresh view self.update() #end #end def create(): global g_maximum_replacer_inst",
"= [] for idx in range(len(result)): node_type = mc.nodeType(result[idx][1]) try: if None ==",
"QW.QCheckBox() self.chk_update_while_typing.setText('Update while typing') self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight) # connections self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed)",
"QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...') gb_selection.layout().addWidget(self.txt_filter_type) # Expression Section # gb_expression = QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout())",
"reversed(range(len(self.selected_items))): # ignore nodes that don't need changing if not self.regexed_items[x][1]: continue old_name",
"idx not in to_remove] #end # filter by expression pattern = self.txt_filter_name.text() if",
"import maya.cmds as mc import maya.mel as mel import maya.OpenMayaUI as omui def",
"= QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget) # copyright! info_lbl = QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True)",
"-1, QG.QFont.Bold, False) self.lv_preview.clear() for x in range(len(self.selected_items)): short_old = self.selected_items[x][0] short_new =",
"x[1]]) dialog_msg = 'Confirm rename of ' + str(number_different) + ' objects?' dialog_result",
"# self.rb_select_all = QW.QRadioButton() self.rb_select_all.setText('All') self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all) # self.rb_select_sel = QW.QRadioButton() self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel)",
"self.rb_select_sel = QW.QRadioButton() self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel) # self.txt_filter_name = QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name) # self.txt_filter_type",
"maya.cmds as mc import maya.mel as mel import maya.OpenMayaUI as omui def get_maya_window():",
"self.rb_select_all = QW.QRadioButton() self.rb_select_all.setText('All') self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all) # self.rb_select_sel = QW.QRadioButton() self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel) #",
"short_new = self.regexed_items[x][0] if self.regexed_items[x][1]: txt = short_old + ' => ' +",
"with the above in size self.regexed_items = [] self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum Replacer') self.setMinimumWidth(380) self.setMinimumHeight(400)",
"gb_selection.layout().addWidget(self.rb_select_all) # self.rb_select_sel = QW.QRadioButton() self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel) # self.txt_filter_name = QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name)",
"get_real_short_names(self, selected): result = [] for x in mc.ls(sl=selected, shortNames=True): result.append(x[x.rfind('|')+1:]) # basically",
"txt = short_old + ' => ' + short_new else: txt = short_old",
"!= x[0])) # (regex name, changed from original) except: result.append((subbed_name, False)) # failed",
"Button! self.btn_commit = QW.QPushButton() self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit) # footer footer_widget = QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0)",
"# footer footer_widget = QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget) # copyright! info_lbl =",
"x in reversed(range(len(self.selected_items))): # ignore nodes that don't need changing if not self.regexed_items[x][1]:",
"self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end def commit(self): # safety check if None == self.selected_items or None",
"enumerate(result) if idx not in to_remove] #end # filter by expression pattern =",
"# end chunk! mc.undoInfo(closeChunk=True) # refresh view self.update() #end #end def create(): global",
"bold_font = QG.QFont('', -1, QG.QFont.Bold, False) self.lv_preview.clear() for x in range(len(self.selected_items)): short_old =",
"= QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs) # Preview Section # gb_preview = QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2)",
"import re import PySide2.QtCore as QC import PySide2.QtGui as QG import PySide2.QtWidgets as",
"# Preview Section # gb_preview = QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview) #",
"# make it maya-valid result.append((subbed_name, subbed_name != x[0])) # (regex name, changed from",
"on_text_changed(self): if not self.chk_update_while_typing.isChecked(): return self.update() #end # called when changes have been",
"mc.ls(sl=selected, shortNames=True): result.append(x[x.rfind('|')+1:]) # basically strip all after last | (the |+1 becomes",
"0, QC.Qt.AlignRight) # connections self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update)",
"chunk! mc.undoInfo(closeChunk=True) # refresh view self.update() #end #end def create(): global g_maximum_replacer_inst try:",
"short_old self.lv_preview.addItem(txt) if self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end def commit(self): # safety check if None",
"rename all objects (in REVERSE order as to not break the hierarchy) for",
"idx in range(len(result)): node_type = mc.nodeType(result[idx][1]) try: if None == re.search(filter_type, node_type): to_remove.append(idx)",
"self.layout().addWidget(gb_selection) # self.rb_select_all = QW.QRadioButton() self.rb_select_all.setText('All') self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all) # self.rb_select_sel = QW.QRadioButton() self.rb_select_sel.setText('Selected')",
"' => ' + short_new else: txt = short_old self.lv_preview.addItem(txt) if self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font)",
"[x for idx, x in enumerate(result) if idx not in to_remove] #end #",
"Exception as e: print 'Failed to rename %s: %s' % (old_name, e) #",
"in text fields (e.g. return pressed) def on_text_edited(self): if self.chk_update_while_typing.isChecked(): return self.update() #end",
"maya.mel as mel import maya.OpenMayaUI as omui def get_maya_window(): ptr = omui.MQtUtil.mainWindow() parent",
"'Editing done' #end def get_real_short_names(self, selected): result = [] for x in mc.ls(sl=selected,",
"self.regexed_items = self.calculate_regexed_names() # 3. update list view with a preview of changes",
"get_maya_window(): ptr = omui.MQtUtil.mainWindow() parent = shiboken2.wrapInstance(long(ptr), QW.QDialog) return parent #end class MaximumReplacer(QW.QDialog):",
"name, changed from original) except: result.append((subbed_name, False)) # failed so just pass through",
"info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum Replacer v1.2 <a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft) # update while",
"with a preview of changes bold_font = QG.QFont('', -1, QG.QFont.Bold, False) self.lv_preview.clear() for",
"all after last | (the |+1 becomes 0 if the find fails, so",
"directly from the script editor (or call it from a shelf) # create()",
"# basically strip all after last | (the |+1 becomes 0 if the",
"# uncomment this to run directly from the script editor (or call it",
"QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget) # lbl_regex = QW.QLabel() lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex) # self.txt_replace_expr",
"# connections self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit)",
"[x for idx, x in enumerate(result) if idx not in to_remove] #end return",
"self.btn_commit.clicked.connect(self.commit) # initial self.update() #end # called when any text changes in text",
"return parent #end class MaximumReplacer(QW.QDialog): def __init__(self, parent=get_maya_window()): QW.QDialog.__init__(self, parent=parent) # [(short_name, long_name),",
"x in range(len(self.selected_items)): short_old = self.selected_items[x][0] short_new = self.regexed_items[x][0] if self.regexed_items[x][1]: txt =",
"long_name), ...] self.selected_items = [] # [(regexed_short_name, different_from_original), ...] maps 1-1 with the",
"gb_expression.layout().addWidget(expr_widget) # lbl_regex = QW.QLabel() lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex) # self.txt_replace_expr = QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr)",
"gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview) # self.lv_preview = QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview) # Button! self.btn_commit =",
"#end for # remove all non-matching elements result = [x for idx, x",
"self.layout().addWidget(gb_expression) # expr_widget = QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget) # lbl_regex = QW.QLabel()",
"if 'No' == dialog_result: return # undo chunk for all names mc.undoInfo(openChunk=True, chunkName='MaximumReplacer')",
"return self.update() #end def edit_done(self): print 'Editing done' #end def get_real_short_names(self, selected): result",
"idx not in to_remove] #end return result #end def calculate_regexed_names(self): pattern = self.txt_replace_expr.text()",
"changed return result #end def update(self): # 1. get the selection self.selected_items =",
"expression pattern = self.txt_filter_name.text() if len(pattern): to_remove = [] for idx in range(len(result)):",
"QC.Qt.AlignLeft) # update while typing checkbox self.chk_update_while_typing = QW.QCheckBox() self.chk_update_while_typing.setText('Update while typing') self.chk_update_while_typing.setChecked(True)",
"as to not break the hierarchy) for x in reversed(range(len(self.selected_items))): # ignore nodes",
"[] for x in self.selected_items: subbed_name = x[0] try: subbed_name = re.sub(pattern, subs,",
"QW.QDialog) return parent #end class MaximumReplacer(QW.QDialog): def __init__(self, parent=get_maya_window()): QW.QDialog.__init__(self, parent=parent) # [(short_name,",
"#end def calculate_regexed_names(self): pattern = self.txt_replace_expr.text() subs = self.txt_replace_subs.text() result = [] for",
"in self.regexed_items if x[1]]) dialog_msg = 'Confirm rename of ' + str(number_different) +",
"in reversed(range(len(self.selected_items))): # ignore nodes that don't need changing if not self.regexed_items[x][1]: continue",
"= [] # [(regexed_short_name, different_from_original), ...] maps 1-1 with the above in size",
"# all objects if self.rb_select_all.isChecked(): result = zip(self.get_real_short_names(False), mc.ls(long=True)) # selected objects if",
"filter by type filter_type = self.txt_filter_type.text() if len(filter_type): to_remove = [] for idx",
"changes bold_font = QG.QFont('', -1, QG.QFont.Bold, False) self.lv_preview.clear() for x in range(len(self.selected_items)): short_old",
"result #end def calculate_regexed_names(self): pattern = self.txt_replace_expr.text() subs = self.txt_replace_subs.text() result = []",
"g_maximum_replacer_inst = MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show() #end # uncomment this to run directly from",
"(e.g. transform)...') gb_selection.layout().addWidget(self.txt_filter_type) # Expression Section # gb_expression = QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5)",
"copyright! info_lbl = QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum Replacer v1.2 <a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl, 0,",
"len(filter_type): to_remove = [] for idx in range(len(result)): node_type = mc.nodeType(result[idx][1]) try: if",
"self.btn_commit = QW.QPushButton() self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit) # footer footer_widget = QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5)",
"re import PySide2.QtCore as QC import PySide2.QtGui as QG import PySide2.QtWidgets as QW",
"QW.QPushButton() self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit) # footer footer_widget = QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget) #",
"#end def get_selection(self, regex=None): result = [] # all objects if self.rb_select_all.isChecked(): result",
"in enumerate(result) if idx not in to_remove] #end # filter by expression pattern",
"subbed_name != x[0])) # (regex name, changed from original) except: result.append((subbed_name, False)) #",
"fails, so it's okay to fail) return result #end def get_selection(self, regex=None): result",
"to_remove.append(idx) except: continue #end for # remove all non-matching elements result = [x",
"node_type = mc.nodeType(result[idx][1]) try: if None == re.search(filter_type, node_type): to_remove.append(idx) except: continue #end",
"= re.sub(pattern, subs, x[0]) subbed_name = mel.eval('formValidObjectName(\\\"{0}\\\");'.format(subbed_name)) # make it maya-valid result.append((subbed_name, subbed_name",
"changing if not self.regexed_items[x][1]: continue old_name = self.selected_items[x][1] # old LONG name new_name",
"KasumiL5x import re import PySide2.QtCore as QC import PySide2.QtGui as QG import PySide2.QtWidgets",
"%s: %s' % (old_name, e) # end chunk! mc.undoInfo(closeChunk=True) # refresh view self.update()",
"subs_widget.layout().addWidget(lbl_subst) # self.txt_replace_subs = QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs) # Preview Section # gb_preview =",
"= QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview) # self.lv_preview = QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview) #",
"result #end def get_selection(self, regex=None): result = [] # all objects if self.rb_select_all.isChecked():",
"gb_selection.layout().addWidget(self.txt_filter_type) # Expression Section # gb_expression = QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular Expression')",
"dialog_result = mc.confirmDialog(title='Maximum Replacer', message=dialog_msg, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No') if 'No' ==",
"QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name) # self.txt_filter_type = QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...') gb_selection.layout().addWidget(self.txt_filter_type) # Expression",
"def get_maya_window(): ptr = omui.MQtUtil.mainWindow() parent = shiboken2.wrapInstance(long(ptr), QW.QDialog) return parent #end class",
"idx, x in enumerate(result) if idx not in to_remove] #end return result #end",
"self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit) # initial self.update() #end # called when",
"# 3. update list view with a preview of changes bold_font = QG.QFont('',",
"fields def on_text_changed(self): if not self.chk_update_while_typing.isChecked(): return self.update() #end # called when changes",
"fields (e.g. return pressed) def on_text_edited(self): if self.chk_update_while_typing.isChecked(): return self.update() #end def edit_done(self):",
"# GitHub: KasumiL5x import re import PySide2.QtCore as QC import PySide2.QtGui as QG",
"= [x for idx, x in enumerate(result) if idx not in to_remove] #end",
"= [] for x in mc.ls(sl=selected, shortNames=True): result.append(x[x.rfind('|')+1:]) # basically strip all after",
"hierarchy) for x in reversed(range(len(self.selected_items))): # ignore nodes that don't need changing if",
"QW.QRadioButton() self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel) # self.txt_filter_name = QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name) # self.txt_filter_type = QW.QLineEdit()",
"= 'Confirm rename of ' + str(number_different) + ' objects?' dialog_result = mc.confirmDialog(title='Maximum",
"gb_preview.layout().addWidget(self.lv_preview) # Button! self.btn_commit = QW.QPushButton() self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit) # footer footer_widget = QW.QWidget()",
"self.chk_update_while_typing.isChecked(): return self.update() #end def edit_done(self): print 'Editing done' #end def get_real_short_names(self, selected):",
"to_remove = [] for idx in range(len(result)): try: if None == re.search(pattern, result[idx][0]):",
"import PySide2.QtWidgets as QW import shiboken2 import maya.cmds as mc import maya.mel as",
"to fail) return result #end def get_selection(self, regex=None): result = [] # all",
"' + str(number_different) + ' objects?' dialog_result = mc.confirmDialog(title='Maximum Replacer', message=dialog_msg, button=['Yes', 'No'],",
"self.selected_items = [] # [(regexed_short_name, different_from_original), ...] maps 1-1 with the above in",
"print 'Editing done' #end def get_real_short_names(self, selected): result = [] for x in",
"SHORT name try: mc.rename(old_name, new_name) except Exception as e: print 'Failed to rename",
"pattern = self.txt_filter_name.text() if len(pattern): to_remove = [] for idx in range(len(result)): try:",
"regex'd versions self.regexed_items = self.calculate_regexed_names() # 3. update list view with a preview",
"Preview Section # gb_preview = QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview) # self.lv_preview",
"to_remove] #end # filter by expression pattern = self.txt_filter_name.text() if len(pattern): to_remove =",
"= self.txt_replace_expr.text() subs = self.txt_replace_subs.text() result = [] for x in self.selected_items: subbed_name",
"in enumerate(result) if idx not in to_remove] #end return result #end def calculate_regexed_names(self):",
"# <NAME>, 2019 # GitHub: KasumiL5x import re import PySide2.QtCore as QC import",
"x in enumerate(result) if idx not in to_remove] #end # filter by expression",
"= QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...') gb_selection.layout().addWidget(self.txt_filter_type) # Expression Section # gb_expression = QW.QGroupBox()",
"for idx, x in enumerate(result) if idx not in to_remove] #end return result",
"pressed) def on_text_edited(self): if self.chk_update_while_typing.isChecked(): return self.update() #end def edit_done(self): print 'Editing done'",
"+ ' => ' + short_new else: txt = short_old self.lv_preview.addItem(txt) if self.regexed_items[x][1]:",
"footer footer_widget = QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget) # copyright! info_lbl = QW.QLabel()",
"in mc.ls(sl=selected, shortNames=True): result.append(x[x.rfind('|')+1:]) # basically strip all after last | (the |+1",
"= short_old + ' => ' + short_new else: txt = short_old self.lv_preview.addItem(txt)",
"that don't need changing if not self.regexed_items[x][1]: continue old_name = self.selected_items[x][1] # old",
"(old_name, e) # end chunk! mc.undoInfo(closeChunk=True) # refresh view self.update() #end #end def",
"= QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum Replacer v1.2 <a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft) #",
"txt = short_old self.lv_preview.addItem(txt) if self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end def commit(self): # safety check",
"return result #end def get_selection(self, regex=None): result = [] # all objects if",
"e) # end chunk! mc.undoInfo(closeChunk=True) # refresh view self.update() #end #end def create():",
"mc.ls(sl=True, long=True)) # filter by type filter_type = self.txt_filter_type.text() if len(filter_type): to_remove =",
"v1.2 <a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>') footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft) # update while typing checkbox self.chk_update_while_typing =",
"QC import PySide2.QtGui as QG import PySide2.QtWidgets as QW import shiboken2 import maya.cmds",
"= QW.QPushButton() self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit) # footer footer_widget = QW.QWidget() footer_widget.setLayout(QW.QHBoxLayout()) footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget)",
"a preview of changes bold_font = QG.QFont('', -1, QG.QFont.Bold, False) self.lv_preview.clear() for x",
"gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular Expression') self.layout().addWidget(gb_expression) # expr_widget = QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget) #",
"Selection Section # gb_selection = QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection) # self.rb_select_all",
"# 1. get the selection self.selected_items = self.get_selection() # 2. get the regex'd",
"preview of changes bold_font = QG.QFont('', -1, QG.QFont.Bold, False) self.lv_preview.clear() for x in",
"def calculate_regexed_names(self): pattern = self.txt_replace_expr.text() subs = self.txt_replace_subs.text() result = [] for x",
"chunkName='MaximumReplacer') # rename all objects (in REVERSE order as to not break the",
"the selection self.selected_items = self.get_selection() # 2. get the regex'd versions self.regexed_items =",
"self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit) # initial self.update() #end # called",
"5, 5) self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop) # Selection Section # gb_selection = QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2)",
"1-1 with the above in size self.regexed_items = [] self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum Replacer') self.setMinimumWidth(380)",
"been committed in text fields (e.g. return pressed) def on_text_edited(self): if self.chk_update_while_typing.isChecked(): return",
"if self.regexed_items[x][1]: txt = short_old + ' => ' + short_new else: txt",
"self.txt_filter_name.textChanged.connect(self.on_text_changed) self.txt_filter_type.textChanged.connect(self.on_text_changed) self.txt_replace_expr.textChanged.connect(self.on_text_changed) self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit) # initial",
"self.layout().setAlignment(QC.Qt.AlignTop) # Selection Section # gb_selection = QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection)",
"gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection) # self.rb_select_all = QW.QRadioButton() self.rb_select_all.setText('All') self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all) # self.rb_select_sel = QW.QRadioButton()",
"update(self): # 1. get the selection self.selected_items = self.get_selection() # 2. get the",
"all names mc.undoInfo(openChunk=True, chunkName='MaximumReplacer') # rename all objects (in REVERSE order as to",
"0 if the find fails, so it's okay to fail) return result #end",
"# self.rb_select_sel = QW.QRadioButton() self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel) # self.txt_filter_name = QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name) #",
"x in enumerate(result) if idx not in to_remove] #end return result #end def",
"or None == self.regexed_items: return # confirm dialog number_different = len([x for x",
"= QG.QFont('', -1, QG.QFont.Bold, False) self.lv_preview.clear() for x in range(len(self.selected_items)): short_old = self.selected_items[x][0]",
"self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5, 5, 5, 5) self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop) # Selection Section # gb_selection",
"= self.selected_items[x][1] # old LONG name new_name = self.regexed_items[x][0] # new SHORT name",
"for x in mc.ls(sl=selected, shortNames=True): result.append(x[x.rfind('|')+1:]) # basically strip all after last |",
"cancelButton='No', dismissString='No') if 'No' == dialog_result: return # undo chunk for all names",
"subs_widget.layout().addWidget(self.txt_replace_subs) # Preview Section # gb_preview = QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview)",
"= mc.confirmDialog(title='Maximum Replacer', message=dialog_msg, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No') if 'No' == dialog_result:",
"typing checkbox self.chk_update_while_typing = QW.QCheckBox() self.chk_update_while_typing.setText('Update while typing') self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight) #",
"not self.regexed_items[x][1]: continue old_name = self.selected_items[x][1] # old LONG name new_name = self.regexed_items[x][0]",
"None == re.search(pattern, result[idx][0]): to_remove.append(idx) except: continue #end for # remove all non-matching",
"through data and make it not changed return result #end def update(self): #",
"omui.MQtUtil.mainWindow() parent = shiboken2.wrapInstance(long(ptr), QW.QDialog) return parent #end class MaximumReplacer(QW.QDialog): def __init__(self, parent=get_maya_window()):",
"as mc import maya.mel as mel import maya.OpenMayaUI as omui def get_maya_window(): ptr",
"expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget) # lbl_regex = QW.QLabel() lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex) # self.txt_replace_expr =",
"strip all after last | (the |+1 becomes 0 if the find fails,",
"global g_maximum_replacer_inst try: g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater() except: pass g_maximum_replacer_inst = MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show() #end",
"to not break the hierarchy) for x in reversed(range(len(self.selected_items))): # ignore nodes that",
"footer_widget.layout().setContentsMargins(0,0,0,0) footer_widget.layout().setSpacing(5) self.layout().addWidget(footer_widget) # copyright! info_lbl = QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum Replacer v1.2",
"old_name = self.selected_items[x][1] # old LONG name new_name = self.regexed_items[x][0] # new SHORT",
"calculate_regexed_names(self): pattern = self.txt_replace_expr.text() subs = self.txt_replace_subs.text() result = [] for x in",
"idx, x in enumerate(result) if idx not in to_remove] #end # filter by",
"= QW.QRadioButton() self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel) # self.txt_filter_name = QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name) # self.txt_filter_type =",
"len([x for x in self.regexed_items if x[1]]) dialog_msg = 'Confirm rename of '",
"Expression') self.layout().addWidget(gb_expression) # expr_widget = QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget) # lbl_regex =",
"# self.txt_filter_name = QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name) # self.txt_filter_type = QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...')",
"as e: print 'Failed to rename %s: %s' % (old_name, e) # end",
"objects if self.rb_select_sel.isChecked(): result = zip(self.get_real_short_names(True), mc.ls(sl=True, long=True)) # filter by type filter_type",
"if None == self.selected_items or None == self.regexed_items: return # confirm dialog number_different",
"short_old + ' => ' + short_new else: txt = short_old self.lv_preview.addItem(txt) if",
"' + short_new else: txt = short_old self.lv_preview.addItem(txt) if self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end def",
"for # remove all non-matching elements result = [x for idx, x in",
"if idx not in to_remove] #end return result #end def calculate_regexed_names(self): pattern =",
"= omui.MQtUtil.mainWindow() parent = shiboken2.wrapInstance(long(ptr), QW.QDialog) return parent #end class MaximumReplacer(QW.QDialog): def __init__(self,",
"dismissString='No') if 'No' == dialog_result: return # undo chunk for all names mc.undoInfo(openChunk=True,",
"% (old_name, e) # end chunk! mc.undoInfo(closeChunk=True) # refresh view self.update() #end #end",
"g_maximum_replacer_inst.show() #end # uncomment this to run directly from the script editor (or",
"all non-matching elements result = [x for idx, x in enumerate(result) if idx",
"text fields (e.g. return pressed) def on_text_edited(self): if self.chk_update_while_typing.isChecked(): return self.update() #end def",
"# gb_preview = QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview) # self.lv_preview = QW.QListWidget()",
"= self.txt_filter_type.text() if len(filter_type): to_remove = [] for idx in range(len(result)): node_type =",
"for idx in range(len(result)): node_type = mc.nodeType(result[idx][1]) try: if None == re.search(filter_type, node_type):",
"parent=get_maya_window()): QW.QDialog.__init__(self, parent=parent) # [(short_name, long_name), ...] self.selected_items = [] # [(regexed_short_name, different_from_original),",
"self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum Replacer') self.setMinimumWidth(380) self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5, 5, 5, 5) self.layout().setSpacing(5) self.layout().setAlignment(QC.Qt.AlignTop) #",
"# self.txt_replace_expr = QW.QLineEdit() self.txt_replace_expr.setPlaceholderText('Regex...') expr_widget.layout().addWidget(self.txt_replace_expr) # subs_widget = QW.QWidget() subs_widget.setLayout(QW.QHBoxLayout()) subs_widget.layout().setContentsMargins(2,2,2,2) subs_widget.layout().setSpacing(5)",
"# lbl_subst = QW.QLabel() lbl_subst.setText('Substitute') subs_widget.layout().addWidget(lbl_subst) # self.txt_replace_subs = QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs) #",
"g_maximum_replacer_inst try: g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater() except: pass g_maximum_replacer_inst = MaximumReplacer() g_maximum_replacer_inst.setAttribute(QC.Qt.WA_DeleteOnClose) g_maximum_replacer_inst.show() #end #",
"# (regex name, changed from original) except: result.append((subbed_name, False)) # failed so just",
"# self.txt_filter_type = QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...') gb_selection.layout().addWidget(self.txt_filter_type) # Expression Section # gb_expression",
"try: if None == re.search(filter_type, node_type): to_remove.append(idx) except: continue #end for # remove",
"= QW.QGroupBox() gb_selection.setLayout(QW.QHBoxLayout()) gb_selection.layout().setContentsMargins(2,2,2,2) gb_selection.layout().setSpacing(5) gb_selection.setTitle('Selection') self.layout().addWidget(gb_selection) # self.rb_select_all = QW.QRadioButton() self.rb_select_all.setText('All') self.rb_select_all.setChecked(True)",
"+ str(number_different) + ' objects?' dialog_result = mc.confirmDialog(title='Maximum Replacer', message=dialog_msg, button=['Yes', 'No'], defaultButton='Yes',",
"filter by expression pattern = self.txt_filter_name.text() if len(pattern): to_remove = [] for idx",
"+ ' objects?' dialog_result = mc.confirmDialog(title='Maximum Replacer', message=dialog_msg, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No')",
"self.update() #end # called when any text changes in text fields def on_text_changed(self):",
"it maya-valid result.append((subbed_name, subbed_name != x[0])) # (regex name, changed from original) except:",
"new_name) except Exception as e: print 'Failed to rename %s: %s' % (old_name,",
"e: print 'Failed to rename %s: %s' % (old_name, e) # end chunk!",
"self.regexed_items = [] self.setWindowFlags(QC.Qt.Window) self.setWindowTitle('Maximum Replacer') self.setMinimumWidth(380) self.setMinimumHeight(400) self.setLayout(QW.QVBoxLayout()) self.layout().setContentsMargins(5, 5, 5, 5)",
"self.rb_select_all.setChecked(True) gb_selection.layout().addWidget(self.rb_select_all) # self.rb_select_sel = QW.QRadioButton() self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel) # self.txt_filter_name = QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...')",
"#end #end def create(): global g_maximum_replacer_inst try: g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater() except: pass g_maximum_replacer_inst =",
"if not self.chk_update_while_typing.isChecked(): return self.update() #end # called when changes have been committed",
"#end return result #end def calculate_regexed_names(self): pattern = self.txt_replace_expr.text() subs = self.txt_replace_subs.text() result",
"long=True)) # filter by type filter_type = self.txt_filter_type.text() if len(filter_type): to_remove = []",
"if x[1]]) dialog_msg = 'Confirm rename of ' + str(number_different) + ' objects?'",
"self.rb_select_sel.setText('Selected') gb_selection.layout().addWidget(self.rb_select_sel) # self.txt_filter_name = QW.QLineEdit() self.txt_filter_name.setPlaceholderText('Pattern...') gb_selection.layout().addWidget(self.txt_filter_name) # self.txt_filter_type = QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type",
"# refresh view self.update() #end #end def create(): global g_maximum_replacer_inst try: g_maximum_replacer_inst.close() g_maximum_replacer_inst.deleteLater()",
"result.append((subbed_name, False)) # failed so just pass through data and make it not",
"gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5) gb_preview.setTitle('Preview') self.layout().addWidget(gb_preview) # self.lv_preview = QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview) # Button! self.btn_commit",
"list view with a preview of changes bold_font = QG.QFont('', -1, QG.QFont.Bold, False)",
"x in mc.ls(sl=selected, shortNames=True): result.append(x[x.rfind('|')+1:]) # basically strip all after last | (the",
"#end # called when changes have been committed in text fields (e.g. return",
"not break the hierarchy) for x in reversed(range(len(self.selected_items))): # ignore nodes that don't",
"gb_selection.layout().addWidget(self.txt_filter_name) # self.txt_filter_type = QW.QLineEdit() self.txt_filter_type.setPlaceholderText('Type (e.g. transform)...') gb_selection.layout().addWidget(self.txt_filter_type) # Expression Section #",
"when changes have been committed in text fields (e.g. return pressed) def on_text_edited(self):",
"except Exception as e: print 'Failed to rename %s: %s' % (old_name, e)",
"basically strip all after last | (the |+1 becomes 0 if the find",
"new SHORT name try: mc.rename(old_name, new_name) except Exception as e: print 'Failed to",
"#end # filter by expression pattern = self.txt_filter_name.text() if len(pattern): to_remove = []",
"expr_widget = QW.QWidget() expr_widget.setLayout(QW.QHBoxLayout()) expr_widget.layout().setContentsMargins(2,2,2,2) expr_widget.layout().setSpacing(5) gb_expression.layout().addWidget(expr_widget) # lbl_regex = QW.QLabel() lbl_regex.setText('Pattern') expr_widget.layout().addWidget(lbl_regex)",
"QW.QLineEdit() self.txt_replace_subs.setPlaceholderText('Substitute...') subs_widget.layout().addWidget(self.txt_replace_subs) # Preview Section # gb_preview = QW.QGroupBox() gb_preview.setLayout(QW.QVBoxLayout()) gb_preview.layout().setContentsMargins(2,2,2,2) gb_preview.layout().setSpacing(5)",
"self.layout().addWidget(footer_widget) # copyright! info_lbl = QW.QLabel() info_lbl.setTextFormat(QC.Qt.RichText) info_lbl.setOpenExternalLinks(True) info_lbl.setText('Maximum Replacer v1.2 <a href=\\\"http://www.dgreen.me/\\\">www.dgreen.me</a>')",
"import maya.OpenMayaUI as omui def get_maya_window(): ptr = omui.MQtUtil.mainWindow() parent = shiboken2.wrapInstance(long(ptr), QW.QDialog)",
"changed from original) except: result.append((subbed_name, False)) # failed so just pass through data",
"if None == re.search(filter_type, node_type): to_remove.append(idx) except: continue #end for # remove all",
"result = [] for x in self.selected_items: subbed_name = x[0] try: subbed_name =",
"short_old = self.selected_items[x][0] short_new = self.regexed_items[x][0] if self.regexed_items[x][1]: txt = short_old + '",
"text fields def on_text_changed(self): if not self.chk_update_while_typing.isChecked(): return self.update() #end # called when",
"# self.lv_preview = QW.QListWidget() gb_preview.layout().addWidget(self.lv_preview) # Button! self.btn_commit = QW.QPushButton() self.btn_commit.setText('Commit') self.layout().addWidget(self.btn_commit) #",
"re.search(pattern, result[idx][0]): to_remove.append(idx) except: continue #end for # remove all non-matching elements result",
"run directly from the script editor (or call it from a shelf) #",
"footer_widget.layout().addWidget(info_lbl, 0, QC.Qt.AlignLeft) # update while typing checkbox self.chk_update_while_typing = QW.QCheckBox() self.chk_update_while_typing.setText('Update while",
"defaultButton='Yes', cancelButton='No', dismissString='No') if 'No' == dialog_result: return # undo chunk for all",
"gb_expression = QW.QGroupBox() gb_expression.setLayout(QW.QVBoxLayout()) gb_expression.layout().setContentsMargins(2,2,2,2) gb_expression.layout().setSpacing(5) gb_expression.setTitle('Regular Expression') self.layout().addWidget(gb_expression) # expr_widget = QW.QWidget()",
"self.selected_items[x][0] short_new = self.regexed_items[x][0] if self.regexed_items[x][1]: txt = short_old + ' => '",
"range(len(self.selected_items)): short_old = self.selected_items[x][0] short_new = self.regexed_items[x][0] if self.regexed_items[x][1]: txt = short_old +",
"return result #end def calculate_regexed_names(self): pattern = self.txt_replace_expr.text() subs = self.txt_replace_subs.text() result =",
"in range(len(result)): try: if None == re.search(pattern, result[idx][0]): to_remove.append(idx) except: continue #end for",
"the hierarchy) for x in reversed(range(len(self.selected_items))): # ignore nodes that don't need changing",
"= self.get_selection() # 2. get the regex'd versions self.regexed_items = self.calculate_regexed_names() # 3.",
"else: txt = short_old self.lv_preview.addItem(txt) if self.regexed_items[x][1]: self.lv_preview.item(self.lv_preview.count()-1).setFont(bold_font) #end def commit(self): # safety",
"= shiboken2.wrapInstance(long(ptr), QW.QDialog) return parent #end class MaximumReplacer(QW.QDialog): def __init__(self, parent=get_maya_window()): QW.QDialog.__init__(self, parent=parent)",
"checkbox self.chk_update_while_typing = QW.QCheckBox() self.chk_update_while_typing.setText('Update while typing') self.chk_update_while_typing.setChecked(True) footer_widget.layout().addWidget(self.chk_update_while_typing, 0, QC.Qt.AlignRight) # connections",
"def on_text_changed(self): if not self.chk_update_while_typing.isChecked(): return self.update() #end # called when changes have",
"range(len(result)): node_type = mc.nodeType(result[idx][1]) try: if None == re.search(filter_type, node_type): to_remove.append(idx) except: continue",
"have been committed in text fields (e.g. return pressed) def on_text_edited(self): if self.chk_update_while_typing.isChecked():",
"self.txt_replace_subs.textChanged.connect(self.on_text_changed) self.txt_filter_name.editingFinished.connect(self.on_text_edited) self.txt_filter_type.editingFinished.connect(self.on_text_edited) self.txt_replace_expr.editingFinished.connect(self.on_text_edited) self.txt_replace_subs.editingFinished.connect(self.on_text_edited) self.rb_select_all.clicked.connect(self.update) self.rb_select_sel.clicked.connect(self.update) self.btn_commit.clicked.connect(self.commit) # initial self.update() #end #",
"if None == re.search(pattern, result[idx][0]): to_remove.append(idx) except: continue #end for # remove all"
] |
[
"Generated by Django 1.11.13 on 2019-04-04 01:37 from django.db import migrations, models class",
"Django 1.11.13 on 2019-04-04 01:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies",
"= [ migrations.AlterField( model_name=\"advice\", name=\"subject\", field=models.CharField( blank=True, max_length=100, null=True, verbose_name=\"Subject\" ), ) ]",
"[(\"advicer\", \"0016_auto_20190404_0320\")] operations = [ migrations.AlterField( model_name=\"advice\", name=\"subject\", field=models.CharField( blank=True, max_length=100, null=True, verbose_name=\"Subject\"",
"by Django 1.11.13 on 2019-04-04 01:37 from django.db import migrations, models class Migration(migrations.Migration):",
"01:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"advicer\", \"0016_auto_20190404_0320\")] operations",
"on 2019-04-04 01:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"advicer\",",
"= [(\"advicer\", \"0016_auto_20190404_0320\")] operations = [ migrations.AlterField( model_name=\"advice\", name=\"subject\", field=models.CharField( blank=True, max_length=100, null=True,",
"class Migration(migrations.Migration): dependencies = [(\"advicer\", \"0016_auto_20190404_0320\")] operations = [ migrations.AlterField( model_name=\"advice\", name=\"subject\", field=models.CharField(",
"2019-04-04 01:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"advicer\", \"0016_auto_20190404_0320\")]",
"from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"advicer\", \"0016_auto_20190404_0320\")] operations =",
"1.11.13 on 2019-04-04 01:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies =",
"import migrations, models class Migration(migrations.Migration): dependencies = [(\"advicer\", \"0016_auto_20190404_0320\")] operations = [ migrations.AlterField(",
"\"0016_auto_20190404_0320\")] operations = [ migrations.AlterField( model_name=\"advice\", name=\"subject\", field=models.CharField( blank=True, max_length=100, null=True, verbose_name=\"Subject\" ),",
"operations = [ migrations.AlterField( model_name=\"advice\", name=\"subject\", field=models.CharField( blank=True, max_length=100, null=True, verbose_name=\"Subject\" ), )",
"migrations, models class Migration(migrations.Migration): dependencies = [(\"advicer\", \"0016_auto_20190404_0320\")] operations = [ migrations.AlterField( model_name=\"advice\",",
"# Generated by Django 1.11.13 on 2019-04-04 01:37 from django.db import migrations, models",
"models class Migration(migrations.Migration): dependencies = [(\"advicer\", \"0016_auto_20190404_0320\")] operations = [ migrations.AlterField( model_name=\"advice\", name=\"subject\",",
"django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"advicer\", \"0016_auto_20190404_0320\")] operations = [",
"dependencies = [(\"advicer\", \"0016_auto_20190404_0320\")] operations = [ migrations.AlterField( model_name=\"advice\", name=\"subject\", field=models.CharField( blank=True, max_length=100,",
"Migration(migrations.Migration): dependencies = [(\"advicer\", \"0016_auto_20190404_0320\")] operations = [ migrations.AlterField( model_name=\"advice\", name=\"subject\", field=models.CharField( blank=True,"
] |
[
"4.5, 4, 850], [88, 5.5, 4, 850], [80, 5.5, 7, 300], [63, 6,",
"khoảng cách nhà tới hồ gươm(m) ') one = np.ones((X.shape[0], 1)) Xbar =",
"850], [88, 4.5, 5, 850], [89, 4.5, 4, 850], [88, 5.5, 4, 850],",
"ax4.plot(Y, X[:, 3]) ax4.set_title('xét khoảng cách với giá tiền') ax4.set_xlabel('giá tiền') ax4.set_ylabel('khoảng cách",
"x3 là số tầng nhà # x4 là khoảng cách tới hồ gươm",
"6, 300], [81, 4.5, 6, 300], [88, 3.5, 4, 850], [88, 4.5, 3,",
"1800], [44, 4, 5, 350], [41, 9, 2, 1800], [37, 4.5, 6, 450],",
"3, 1800], [44, 4, 5, 350], [41, 9, 2, 1800], [37, 4.5, 6,",
"với giá tiền') ax3.set_xlabel('giá tiền') ax3.set_ylabel('số tầng nhà') ax4.plot(Y, X[:, 3]) ax4.set_title('xét khoảng",
"20.5, 20.52, 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22, 22.5, 29, 30,",
"32, 32, 32.3, 32.3, 32.37, 32.4, 32.5, 32.65, 32.7, 33, 33, 33, 33.5,",
"chiều dài mặt tiền(m) ') tn_name = st.sidebar.text_input('Nhập số tầng nhà(tầng) ') kc_name",
"4.3, 5, 450], [62, 7, 1, 800], [63, 6, 1, 800], [31.2, 4,",
"31.3, 31.35, 31.5, 31.5, 31.63, 31.7, 32, 32, 32, 32, 32, 32.3, 32.3,",
"cd_name, tn_name, kc_name, 1]) if st.sidebar.button('Dự đoán'): y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \\ float(tn_name)+w_4*float(kc_name)",
"nhà') dt_name = st.sidebar.text_input('Nhập diện tích đất(m2) ') cd_name = st.sidebar.text_input('Nhập chiều dài",
"1800], [36, 4.5, 7, 450], [40, 9, 3, 1800], [44, 4, 5, 350],",
"4.5, 3, 900], [86, 6, 5, 950], [60, 4.3, 5, 450], [62, 7,",
"1, 800], [62, 5, 1, 800], [85, 4, 6, 950], [84, 6, 5,",
"Y)) st.sidebar.title('Dự đoán giá các mẫu nhà') dt_name = st.sidebar.text_input('Nhập diện tích đất(m2)",
"hồ gươm(m) ') one = np.ones((X.shape[0], 1)) Xbar = np.concatenate((one, X), axis=1) x_train,",
"mặt tiền với giá tiền') ax2.set_xlabel('giá tiền') ax2.set_ylabel('số mét mặt tiền') return fig",
"96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5 ]]).T def duel_plot(X1, X2, Y): fig",
"chính xác (R2 square) : \", r2_score(y_test, np.dot(x_test, w))) vd = np.array([dt_name, cd_name,",
"9, 2, 1800], [40, 9, 1, 1800], [36, 4.5, 5, 450], [36, 4.5,",
"[87, 3.5, 3, 900], [30.2, 4, 4, 450], [62, 6, 3, 800], [86,",
"5, 950], [60, 3.3, 5, 450], [61, 6, 1, 800], [62, 5, 1,",
"Xbar = np.concatenate((one, X), axis=1) x_train, x_test, y_train, y_test = train_test_split(Xbar, Y, test_size=0.2)",
"X = np.array([[40, 8, 2, 1800], [36, 3.5, 6, 450], [35, 4.5, 6,",
"800], [85, 4, 6, 950], [84, 6, 5, 950], [86, 2.5, 3, 900],",
"5, 950], [86, 2.5, 3, 900], [60, 3.3, 6, 450], [85, 5, 5,",
"matplotlib.pyplot as plt st.title('Mô hình dự đoán giá nhà đất tại hồ gươm",
"from __future__ import division, print_function, unicode_literals import streamlit as st from sklearn.metrics import",
"2) ax3.plot(Y, X[:, 2]) ax3.set_title('xét số tầng nhà với giá tiền') ax3.set_xlabel('giá tiền')",
"[63, 7, 3, 250], [63, 7, 4, 250], [63, 7, 5, 250], [64,",
"950], [60, 3.3, 5, 450], [61, 6, 1, 800], [62, 5, 1, 800],",
"450], [36, 5.5, 6, 450], [40, 10, 2, 1800], [45, 3, 4, 350],",
"[62, 6, 3, 800], [86, 4.5, 3, 900], [86, 6, 5, 950], [60,",
"500], [139, 5.5, 5, 500], [140, 5.5, 4, 500], [140, 5.5, 5, 500],",
"9, 3, 1800], [44, 4, 5, 350], [41, 9, 2, 1800], [37, 4.5,",
"mean_squared_error, r2_score from sklearn.model_selection import train_test_split import pandas as pd import numpy as",
"44.1, 50, 52.3, 53, 53.38, 53.62, 54, 54.5, 55, 55.46, 55.5, 55.54, 56,",
"= w[1][0] w_2 = w[2][0] w_3 = w[3][0] w_4 = w[4][0] st.write(\"Độ chính",
"950], [84, 6, 5, 950], [86, 2.5, 3, 900], [60, 3.3, 6, 450],",
"4, 5, 550], [63, 5, 6 , 550], [63, 6, 4, 550], [80,",
"fig = plt.figure(figsize=(15, 5)) ax1 = fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2,",
"6, 5, 950], [86, 2.5, 3, 900], [60, 3.3, 6, 450], [85, 5,",
"4, 250], [63, 7, 5, 250], [64, 7, 4, 250], [63, 8, 4,",
"[60, 2.3, 5, 450], [59, 3.3, 5, 450], [60, 3.3, 4, 450], [85,",
"19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5, 20.5, 20.52, 20.55, 20.7,",
"= st.sidebar.text_input('Nhập số tầng nhà(tầng) ') kc_name = st.sidebar.text_input('Nhập khoảng cách nhà tới",
"np.array([dt_name, cd_name, tn_name, kc_name, 1]) if st.sidebar.button('Dự đoán'): y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \\",
"5, 550], [32.2, 4 , 4, 450], [31.2, 5, 4, 450], [63, 5,",
"5, 550], [31.2, 4, 5, 450], [63, 5, 3, 550], [63, 4, 5,",
"nhà với giá tiền') ax3.set_xlabel('giá tiền') ax3.set_ylabel('số tầng nhà') ax4.plot(Y, X[:, 3]) ax4.set_title('xét",
"# x3 là số tầng nhà # x4 là khoảng cách tới hồ",
"3, 900], [60, 3.3, 6, 450], [85, 5, 5, 950], [85, 3.5, 3,",
"4, 5, 450], [63, 5, 3, 550], [63, 4, 5, 550], [32.2, 4",
"division, print_function, unicode_literals import streamlit as st from sklearn.metrics import mean_squared_error, r2_score from",
"gươm (m) X = np.array([[40, 8, 2, 1800], [36, 3.5, 6, 450], [35,",
"khoảng cách tới hồ gươm (m) X = np.array([[40, 8, 2, 1800], [36,",
"[140, 5.5, 4, 500], [140, 5.5, 5, 500], [140, 5.5, 6, 500], [141,",
"hồ gươm (m) X = np.array([[40, 8, 2, 1800], [36, 3.5, 6, 450],",
"800], [85, 6, 5, 950], [86, 3.5, 3, 900], [62, 6, 2, 800],",
"5, 450], [62, 7, 1, 800], [63, 6, 1, 800], [31.2, 4, 4,",
"63.5, 63.7, 66, 96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5 ]]).T def duel_plot(X1,",
"6, 450], [40, 10, 2, 1800], [45, 3, 4, 350], [45, 4, 3,",
"st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(duel_plot(X[:, 0], X[:, 1], Y)) st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y)) st.sidebar.title('Dự",
"= np.dot(Xbar.T, Y) w = np.dot(np.linalg.pinv(A), b) w_0 = w[0][0] w_1 = w[1][0]",
"450], [36, 4.5, 6, 450], [40, 9, 2, 1800], [36, 4.5, 7, 450],",
"32.65, 32.7, 33, 33, 33, 33.5, 33.5, 33.6, 34, 34, 34.3, 34.6, 35,",
"from sklearn.model_selection import train_test_split import pandas as pd import numpy as np import",
"32.37, 32.4, 32.5, 32.65, 32.7, 33, 33, 33, 33.5, 33.5, 33.6, 34, 34,",
"56.7, 60, 62.3, 62.5, 63, 63.5, 63.7, 66, 96.5, 97.3, 97.5, 98, 98.5,",
"Y): fig = plt.figure(figsize=(15, 5)) ax3 = fig.add_subplot(1, 2, 1) ax4 = fig.add_subplot(1,",
"[35, 4.5, 6, 450], [39, 9, 2, 1800], [40, 9, 1, 1800], [36,",
"4, 450], [85, 4, 4, 950], [85, 4, 5, 950], [60, 3.3, 5,",
"w_0 = w[0][0] w_1 = w[1][0] w_2 = w[2][0] w_3 = w[3][0] w_4",
"55.5, 55.54, 56, 56.7, 60, 62.3, 62.5, 63, 63.5, 63.7, 66, 96.5, 97.3,",
"[62, 6, 2, 800], [86, 3.5, 4, 900], [87, 3.5, 3, 900], [30.2,",
"450], [62, 4, 5, 550], [31.2, 4, 5, 450], [63, 5, 3, 550],",
"tích m2') ax2.plot(Y, X[:, 1]) ax2.set_title('xét số mét mặt tiền với giá tiền')",
"5.5, 7, 300], [63, 6, 4, 250], [62, 7, 4, 250], [63, 7,",
"ax1.plot(Y, X[:, 0]) ax1.set_title('xét diện tích với giá tiền') ax1.set_xlabel('giá tiền') ax1.set_ylabel('Diện tích",
"\", r2_score(y_test, np.dot(x_test, w))) vd = np.array([dt_name, cd_name, tn_name, kc_name, 1]) if st.sidebar.button('Dự",
"8, 1100], [80, 3.5, 6, 300], [80, 4.5, 5, 300], [80, 4.5, 6,",
"3, 450], [62, 4, 5, 550], [31.2, 4, 5, 450], [63, 5, 3,",
"3, 850], [88, 4.5, 4, 850], [87, 4.5, 4, 850], [88, 4.5, 5,",
"5, 500], [140, 5.5, 6, 500], [141, 5.5, 5, 500], [140, 6.5, 5,",
"def duel_plot2(X4, X5, Y): fig = plt.figure(figsize=(15, 5)) ax3 = fig.add_subplot(1, 2, 1)",
"4, 5, 950], [60, 3.3, 5, 450], [61, 6, 1, 800], [62, 5,",
"4, 450], [62, 6, 3, 800], [86, 4.5, 3, 900], [86, 6, 5,",
"3.5, 2, 900], [31.2, 3, 4, 450], [61, 3.3, 5, 450], [62, 6,",
"6, 300], [80, 4.5, 5, 300], [80, 4.5, 6, 300], [79, 4.5, 6,",
"4, 450], [31.2, 5, 4, 450], [63, 5, 5, 550], [64, 4, 5,",
"1, 800], [31.2, 4, 4, 450], [31.2, 4, 3, 450], [62, 4, 5,",
"1)) Xbar = np.concatenate((one, X), axis=1) x_train, x_test, y_train, y_test = train_test_split(Xbar, Y,",
"50, 52.3, 53, 53.38, 53.62, 54, 54.5, 55, 55.46, 55.5, 55.54, 56, 56.7,",
"[88, 4.5, 3, 850], [88, 4.5, 4, 850], [87, 4.5, 4, 850], [88,",
"3.5, 6, 300], [80, 4.5, 5, 300], [80, 4.5, 6, 300], [79, 4.5,",
"97.5, 98, 98.5, 98.7, 99.5 ]]).T def duel_plot(X1, X2, Y): fig = plt.figure(figsize=(15,",
"') cd_name = st.sidebar.text_input('Nhập chiều dài mặt tiền(m) ') tn_name = st.sidebar.text_input('Nhập số",
"5, 450], [62, 6, 1, 800], [85, 6, 5, 950], [86, 3.5, 3,",
"[63, 6, 4, 250], [62, 7, 4, 250], [63, 7, 3, 250], [63,",
"[31.2, 4, 5, 450], [63, 5, 3, 550], [63, 4, 5, 550], [32.2,",
"66, 96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5 ]]).T def duel_plot(X1, X2, Y):",
"plt.figure(figsize=(15, 5)) ax1 = fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2, 2) ax1.plot(Y,",
"5.8, 7, 1100], [80, 4.8, 8, 1100], [80, 5.8, 8, 1100], [79, 5.8,",
"nhà # x4 là khoảng cách tới hồ gươm (m) X = np.array([[40,",
"[64, 4, 5, 550], [63, 5, 6 , 550], [63, 6, 4, 550],",
"[39, 9, 2, 1800], [40, 9, 1, 1800], [36, 4.5, 5, 450], [36,",
"63.7, 66, 96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5 ]]).T def duel_plot(X1, X2,",
"5.5, 5, 500], [140, 6.5, 5, 500]]) Y = np.array([[ 19, 19.3, 19.45,",
"np import matplotlib.pyplot as plt st.title('Mô hình dự đoán giá nhà đất tại",
"99.5 ]]).T def duel_plot(X1, X2, Y): fig = plt.figure(figsize=(15, 5)) ax1 = fig.add_subplot(1,",
"6, 950], [84, 6, 5, 950], [86, 2.5, 3, 900], [60, 3.3, 6,",
"4 , 4, 450], [31.2, 5, 4, 450], [63, 5, 5, 550], [64,",
"1100], [81, 5.8, 8, 1100], [80, 6.8, 8, 1100], [80, 3.5, 6, 300],",
"21, 21, 21.3, 21.5, 21.7, 22, 22.5, 29, 30, 30.5, 30.5, 30.8, 31,",
"7, 4, 250], [63, 8, 4, 250], [140, 4.5, 5, 500], [139, 5.5,",
"5.5, 6, 450], [40, 10, 2, 1800], [45, 3, 4, 350], [45, 4,",
"[85, 4, 6, 950], [84, 6, 5, 950], [86, 2.5, 3, 900], [60,",
"[79, 4.5, 6, 300], [81, 4.5, 6, 300], [88, 3.5, 4, 850], [88,",
"350], [45, 3, 4, 350], [60, 2.3, 5, 450], [59, 3.3, 5, 450],",
"[31.2, 4, 3, 450], [62, 4, 5, 550], [31.2, 4, 5, 450], [63,",
"[88, 3.5, 4, 850], [88, 4.5, 3, 850], [88, 4.5, 4, 850], [87,",
"= train_test_split(Xbar, Y, test_size=0.2) A = np.dot(Xbar.T, Xbar) b = np.dot(Xbar.T, Y) w",
"4.5, 6, 300], [79, 4.5, 6, 300], [81, 4.5, 6, 300], [88, 3.5,",
"tích đất(m2) ') cd_name = st.sidebar.text_input('Nhập chiều dài mặt tiền(m) ') tn_name =",
"= w[0][0] w_1 = w[1][0] w_2 = w[2][0] w_3 = w[3][0] w_4 =",
"[141, 5.5, 5, 500], [140, 6.5, 5, 500]]) Y = np.array([[ 19, 19.3,",
"[31.2, 4, 4, 450], [31.2, 4, 3, 450], [62, 4, 5, 550], [31.2,",
"gươm') return fig st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(duel_plot(X[:, 0], X[:, 1], Y)) st.pyplot(duel_plot2(X[:, 2], X[:,",
"3, 4, 350], [60, 2.3, 5, 450], [59, 3.3, 5, 450], [60, 3.3,",
"32.3, 32.3, 32.37, 32.4, 32.5, 32.65, 32.7, 33, 33, 33, 33.5, 33.5, 33.6,",
"mặt tiền (m) # x3 là số tầng nhà # x4 là khoảng",
"st.sidebar.title('Dự đoán giá các mẫu nhà') dt_name = st.sidebar.text_input('Nhập diện tích đất(m2) ')",
"4, 4, 350], [45, 4, 5, 350], [45, 5, 4, 350], [45, 3,",
"là khoảng cách tới hồ gươm (m) X = np.array([[40, 8, 2, 1800],",
"1800], [36, 4.5, 5, 450], [36, 4.5, 6, 450], [40, 9, 2, 1800],",
"7, 1100], [80, 4.8, 8, 1100], [80, 5.8, 8, 1100], [79, 5.8, 8,",
"w[4][0] st.write(\"Độ chính xác (R2 square) : \", r2_score(y_test, np.dot(x_test, w))) vd =",
"đất tại hồ gươm ') # x1 là diện tích của lô đất(m2)",
"[85, 6, 5, 950], [86, 3.5, 3, 900], [62, 6, 2, 800], [86,",
"[63, 7, 4, 250], [63, 7, 5, 250], [64, 7, 4, 250], [63,",
"6, 4, 550], [80, 5.8, 7, 1100], [80, 4.8, 8, 1100], [80, 5.8,",
"w_4 = w[4][0] st.write(\"Độ chính xác (R2 square) : \", r2_score(y_test, np.dot(x_test, w)))",
"19.7, 20, 20, 20.3, 20.5, 20.5, 20.52, 20.55, 20.7, 21, 21, 21.3, 21.5,",
"[140, 4.5, 5, 500], [139, 5.5, 5, 500], [140, 5.5, 4, 500], [140,",
"6, 300], [79, 4.5, 6, 300], [81, 4.5, 6, 300], [88, 3.5, 4,",
"1100], [80, 5.8, 8, 1100], [79, 5.8, 8, 1100], [80, 5.8, 9, 1100],",
"6, 5, 950], [60, 4.3, 5, 450], [62, 7, 1, 800], [63, 6,",
"450], [35, 4.5, 6, 450], [39, 9, 2, 1800], [40, 9, 1, 1800],",
"x4 là khoảng cách tới hồ gươm (m) X = np.array([[40, 8, 2,",
"5, 4, 350], [45, 3, 4, 350], [60, 2.3, 5, 450], [59, 3.3,",
"32, 32.3, 32.3, 32.37, 32.4, 32.5, 32.65, 32.7, 33, 33, 33, 33.5, 33.5,",
"giá tiền') ax2.set_xlabel('giá tiền') ax2.set_ylabel('số mét mặt tiền') return fig def duel_plot2(X4, X5,",
"31.35, 31.5, 31.5, 31.63, 31.7, 32, 32, 32, 32, 32, 32.3, 32.3, 32.37,",
"[64, 7, 4, 250], [63, 8, 4, 250], [140, 4.5, 5, 500], [139,",
"w_2 = w[2][0] w_3 = w[3][0] w_4 = w[4][0] st.write(\"Độ chính xác (R2",
"450], [61, 6, 1, 800], [62, 5, 1, 800], [85, 4, 6, 950],",
"450], [62, 6, 3, 800], [86, 4.5, 3, 900], [86, 6, 5, 950],",
"= st.sidebar.text_input('Nhập chiều dài mặt tiền(m) ') tn_name = st.sidebar.text_input('Nhập số tầng nhà(tầng)",
"3.5, 6, 450], [35, 4.5, 6, 450], [39, 9, 2, 1800], [40, 9,",
"st.sidebar.text_input('Nhập số tầng nhà(tầng) ') kc_name = st.sidebar.text_input('Nhập khoảng cách nhà tới hồ",
"duel_plot2(X4, X5, Y): fig = plt.figure(figsize=(15, 5)) ax3 = fig.add_subplot(1, 2, 1) ax4",
"4.5, 4, 850], [87, 4.5, 4, 850], [88, 4.5, 5, 850], [89, 4.5,",
"1) ax2 = fig.add_subplot(1, 2, 2) ax1.plot(Y, X[:, 0]) ax1.set_title('xét diện tích với",
"ax3.plot(Y, X[:, 2]) ax3.set_title('xét số tầng nhà với giá tiền') ax3.set_xlabel('giá tiền') ax3.set_ylabel('số",
"') one = np.ones((X.shape[0], 1)) Xbar = np.concatenate((one, X), axis=1) x_train, x_test, y_train,",
"2.5, 3, 900], [60, 3.3, 6, 450], [85, 5, 5, 950], [85, 3.5,",
"số tầng nhà(tầng) ') kc_name = st.sidebar.text_input('Nhập khoảng cách nhà tới hồ gươm(m)",
"fig def duel_plot2(X4, X5, Y): fig = plt.figure(figsize=(15, 5)) ax3 = fig.add_subplot(1, 2,",
"ax4.set_ylabel('khoảng cách tới hồ gươm') return fig st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(duel_plot(X[:, 0], X[:, 1],",
"') tn_name = st.sidebar.text_input('Nhập số tầng nhà(tầng) ') kc_name = st.sidebar.text_input('Nhập khoảng cách",
"350], [41, 9, 2, 1800], [37, 4.5, 6, 450], [36, 5.5, 6, 450],",
"[63, 8, 4, 250], [140, 4.5, 5, 500], [139, 5.5, 5, 500], [140,",
"5.8, 9, 1100], [81, 5.8, 8, 1100], [80, 6.8, 8, 1100], [80, 3.5,",
"là số tầng nhà # x4 là khoảng cách tới hồ gươm (m)",
"[45, 3, 4, 350], [45, 4, 3, 350], [45, 4, 4, 350], [45,",
"1800], [36, 3.5, 6, 450], [35, 4.5, 6, 450], [39, 9, 2, 1800],",
"[31.2, 3, 4, 450], [61, 3.3, 5, 450], [62, 6, 1, 800], [85,",
"nhà tới hồ gươm(m) ') one = np.ones((X.shape[0], 1)) Xbar = np.concatenate((one, X),",
"900], [31.2, 3, 4, 450], [61, 3.3, 5, 450], [62, 6, 1, 800],",
"[36, 5.5, 6, 450], [40, 10, 2, 1800], [45, 3, 4, 350], [45,",
"5, 300], [80, 4.5, 6, 300], [79, 4.5, 6, 300], [81, 4.5, 6,",
"số tầng nhà với giá tiền') ax3.set_xlabel('giá tiền') ax3.set_ylabel('số tầng nhà') ax4.plot(Y, X[:,",
"4.5, 6, 300], [88, 3.5, 4, 850], [88, 4.5, 3, 850], [88, 4.5,",
"250], [63, 8, 4, 250], [140, 4.5, 5, 500], [139, 5.5, 5, 500],",
"as st from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split import pandas",
"6, 500], [141, 5.5, 5, 500], [140, 6.5, 5, 500]]) Y = np.array([[",
"tiền') ax3.set_ylabel('số tầng nhà') ax4.plot(Y, X[:, 3]) ax4.set_title('xét khoảng cách với giá tiền')",
"950], [85, 4, 5, 950], [60, 3.3, 5, 450], [61, 6, 1, 800],",
"w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \\ float(tn_name)+w_4*float(kc_name) + w_0 st.sidebar.write('Giá của ngôi nhà là : ',",
"4, 250], [63, 7, 3, 250], [63, 7, 4, 250], [63, 7, 5,",
"6, 450], [85, 5, 5, 950], [85, 3.5, 3, 900], [86, 3.5, 2,",
"[63, 7, 5, 250], [64, 7, 4, 250], [63, 8, 4, 250], [140,",
"[45, 4, 5, 350], [45, 5, 4, 350], [45, 3, 4, 350], [60,",
"20, 20, 20.3, 20.5, 20.5, 20.52, 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7,",
"mặt tiền') return fig def duel_plot2(X4, X5, Y): fig = plt.figure(figsize=(15, 5)) ax3",
"29, 30, 30.5, 30.5, 30.8, 31, 31, 31, 31, 31.3, 31.35, 31.5, 31.5,",
"[63, 6, 4, 550], [80, 5.8, 7, 1100], [80, 4.8, 8, 1100], [80,",
"[86, 2.5, 3, 900], [60, 3.3, 6, 450], [85, 5, 5, 950], [85,",
"[60, 3.3, 6, 450], [85, 5, 5, 950], [85, 3.5, 3, 900], [86,",
"3, 900], [30.2, 4, 4, 450], [62, 6, 3, 800], [86, 4.5, 3,",
"20.3, 20.5, 20.5, 20.52, 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22, 22.5,",
"ax4.set_xlabel('giá tiền') ax4.set_ylabel('khoảng cách tới hồ gươm') return fig st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(duel_plot(X[:, 0],",
"450], [85, 4, 4, 950], [85, 4, 5, 950], [60, 3.3, 5, 450],",
"350], [45, 4, 5, 350], [45, 5, 4, 350], [45, 3, 4, 350],",
"[30.2, 4, 4, 450], [62, 6, 3, 800], [86, 4.5, 3, 900], [86,",
"giá tiền') ax1.set_xlabel('giá tiền') ax1.set_ylabel('Diện tích m2') ax2.plot(Y, X[:, 1]) ax2.set_title('xét số mét",
"[36, 4.5, 5, 450], [36, 4.5, 6, 450], [40, 9, 2, 1800], [36,",
"mét mặt tiền') return fig def duel_plot2(X4, X5, Y): fig = plt.figure(figsize=(15, 5))",
"32.4, 32.5, 32.65, 32.7, 33, 33, 33, 33.5, 33.5, 33.6, 34, 34, 34.3,",
"2, 1) ax4 = fig.add_subplot(1, 2, 2) ax3.plot(Y, X[:, 2]) ax3.set_title('xét số tầng",
"[86, 6, 5, 950], [60, 4.3, 5, 450], [62, 7, 1, 800], [63,",
"6, 5, 950], [86, 3.5, 3, 900], [62, 6, 2, 800], [86, 3.5,",
"ax2.set_xlabel('giá tiền') ax2.set_ylabel('số mét mặt tiền') return fig def duel_plot2(X4, X5, Y): fig",
"tiền') return fig def duel_plot2(X4, X5, Y): fig = plt.figure(figsize=(15, 5)) ax3 =",
"tn_name, kc_name, 1]) if st.sidebar.button('Dự đoán'): y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \\ float(tn_name)+w_4*float(kc_name) +",
"(m) # x3 là số tầng nhà # x4 là khoảng cách tới",
"= np.array([[ 19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5, 20.5,",
"2, 1800], [37, 4.5, 6, 450], [36, 5.5, 6, 450], [40, 10, 2,",
"950], [85, 3.5, 3, 900], [86, 3.5, 2, 900], [31.2, 3, 4, 450],",
"3, 800], [86, 4.5, 3, 900], [86, 6, 5, 950], [60, 4.3, 5,",
"= np.array([dt_name, cd_name, tn_name, kc_name, 1]) if st.sidebar.button('Dự đoán'): y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 *",
"4.5, 6, 300], [81, 4.5, 6, 300], [88, 3.5, 4, 850], [88, 4.5,",
"fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2, 2) ax1.plot(Y, X[:, 0]) ax1.set_title('xét diện",
"10, 2, 1800], [45, 3, 4, 350], [45, 4, 3, 350], [45, 4,",
"giá nhà đất tại hồ gươm ') # x1 là diện tích của",
"1) ax4 = fig.add_subplot(1, 2, 2) ax3.plot(Y, X[:, 2]) ax3.set_title('xét số tầng nhà",
"350], [45, 4, 3, 350], [45, 4, 4, 350], [45, 4, 5, 350],",
"5, 450], [63, 5, 3, 550], [63, 4, 5, 550], [32.2, 4 ,",
"đất(m2) # x2 là chiều dài mặt tiền (m) # x3 là số",
"6, 450], [35, 4.5, 6, 450], [39, 9, 2, 1800], [40, 9, 1,",
"34.6, 35, 35, 35, 35.5, 35.7, 42.5, 42.9, 43, 43.463, 43.5, 43.537, 44.1,",
"[45, 5, 4, 350], [45, 3, 4, 350], [60, 2.3, 5, 450], [59,",
"55.46, 55.5, 55.54, 56, 56.7, 60, 62.3, 62.5, 63, 63.5, 63.7, 66, 96.5,",
"3, 900], [86, 3.5, 2, 900], [31.2, 3, 4, 450], [61, 3.3, 5,",
"350], [45, 4, 4, 350], [45, 4, 5, 350], [45, 5, 4, 350],",
"6, 1, 800], [62, 5, 1, 800], [85, 4, 6, 950], [84, 6,",
"9, 1, 1800], [36, 4.5, 5, 450], [36, 4.5, 6, 450], [40, 9,",
"[80, 5.5, 7, 300], [63, 6, 4, 250], [62, 7, 4, 250], [63,",
"5, 950], [85, 3.5, 3, 900], [86, 3.5, 2, 900], [31.2, 3, 4,",
"800], [62, 5, 1, 800], [85, 4, 6, 950], [84, 6, 5, 950],",
"các mẫu nhà') dt_name = st.sidebar.text_input('Nhập diện tích đất(m2) ') cd_name = st.sidebar.text_input('Nhập",
"[80, 6.8, 8, 1100], [80, 3.5, 6, 300], [80, 4.5, 5, 300], [80,",
"4, 850], [88, 5.5, 4, 850], [80, 5.5, 7, 300], [63, 6, 4,",
"sklearn.model_selection import train_test_split import pandas as pd import numpy as np import matplotlib.pyplot",
"6 , 550], [63, 6, 4, 550], [80, 5.8, 7, 1100], [80, 4.8,",
"của lô đất(m2) # x2 là chiều dài mặt tiền (m) # x3",
"450], [59, 3.3, 5, 450], [60, 3.3, 4, 450], [85, 4, 4, 950],",
"4, 950], [85, 4, 5, 950], [60, 3.3, 5, 450], [61, 6, 1,",
"4, 850], [88, 4.5, 3, 850], [88, 4.5, 4, 850], [87, 4.5, 4,",
"hồ gươm') return fig st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(duel_plot(X[:, 0], X[:, 1], Y)) st.pyplot(duel_plot2(X[:, 2],",
"[40, 9, 2, 1800], [36, 4.5, 7, 450], [40, 9, 3, 1800], [44,",
"450], [62, 7, 1, 800], [63, 6, 1, 800], [31.2, 4, 4, 450],",
"2, 1800], [45, 3, 4, 350], [45, 4, 3, 350], [45, 4, 4,",
"4, 850], [80, 5.5, 7, 300], [63, 6, 4, 250], [62, 7, 4,",
"62.3, 62.5, 63, 63.5, 63.7, 66, 96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5",
"X[:, 2]) ax3.set_title('xét số tầng nhà với giá tiền') ax3.set_xlabel('giá tiền') ax3.set_ylabel('số tầng",
"20, 20.3, 20.5, 20.5, 20.52, 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22,",
"dự đoán giá nhà đất tại hồ gươm ') # x1 là diện",
"550], [63, 4, 5, 550], [32.2, 4 , 4, 450], [31.2, 5, 4,",
"500], [140, 5.5, 6, 500], [141, 5.5, 5, 500], [140, 6.5, 5, 500]])",
"43, 43.463, 43.5, 43.537, 44.1, 50, 52.3, 53, 53.38, 53.62, 54, 54.5, 55,",
"5, 500], [140, 6.5, 5, 500]]) Y = np.array([[ 19, 19.3, 19.45, 19.48,",
"33, 33, 33, 33.5, 33.5, 33.6, 34, 34, 34.3, 34.6, 35, 35, 35,",
"30.8, 31, 31, 31, 31, 31.3, 31.35, 31.5, 31.5, 31.63, 31.7, 32, 32,",
"5)) ax1 = fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2, 2) ax1.plot(Y, X[:,",
"diện tích với giá tiền') ax1.set_xlabel('giá tiền') ax1.set_ylabel('Diện tích m2') ax2.plot(Y, X[:, 1])",
"4.5, 4, 850], [88, 4.5, 5, 850], [89, 4.5, 4, 850], [88, 5.5,",
"với giá tiền') ax2.set_xlabel('giá tiền') ax2.set_ylabel('số mét mặt tiền') return fig def duel_plot2(X4,",
"950], [86, 3.5, 3, 900], [62, 6, 2, 800], [86, 3.5, 4, 900],",
"30, 30.5, 30.5, 30.8, 31, 31, 31, 31, 31.3, 31.35, 31.5, 31.5, 31.63,",
"450], [40, 10, 2, 1800], [45, 3, 4, 350], [45, 4, 3, 350],",
"ax4.set_title('xét khoảng cách với giá tiền') ax4.set_xlabel('giá tiền') ax4.set_ylabel('khoảng cách tới hồ gươm')",
"31, 31, 31, 31, 31.3, 31.35, 31.5, 31.5, 31.63, 31.7, 32, 32, 32,",
"6, 450], [36, 5.5, 6, 450], [40, 10, 2, 1800], [45, 3, 4,",
"False) st.pyplot(duel_plot(X[:, 0], X[:, 1], Y)) st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y)) st.sidebar.title('Dự đoán",
"với giá tiền') ax1.set_xlabel('giá tiền') ax1.set_ylabel('Diện tích m2') ax2.plot(Y, X[:, 1]) ax2.set_title('xét số",
"đoán giá nhà đất tại hồ gươm ') # x1 là diện tích",
"return fig def duel_plot2(X4, X5, Y): fig = plt.figure(figsize=(15, 5)) ax3 = fig.add_subplot(1,",
"4, 450], [63, 5, 5, 550], [64, 4, 5, 550], [63, 5, 6",
"250], [63, 7, 3, 250], [63, 7, 4, 250], [63, 7, 5, 250],",
"53.62, 54, 54.5, 55, 55.46, 55.5, 55.54, 56, 56.7, 60, 62.3, 62.5, 63,",
"tiền(m) ') tn_name = st.sidebar.text_input('Nhập số tầng nhà(tầng) ') kc_name = st.sidebar.text_input('Nhập khoảng",
"2]) ax3.set_title('xét số tầng nhà với giá tiền') ax3.set_xlabel('giá tiền') ax3.set_ylabel('số tầng nhà')",
"1, 1800], [36, 4.5, 5, 450], [36, 4.5, 6, 450], [40, 9, 2,",
"[36, 4.5, 6, 450], [40, 9, 2, 1800], [36, 4.5, 7, 450], [40,",
"[40, 9, 3, 1800], [44, 4, 5, 350], [41, 9, 2, 1800], [37,",
"4, 850], [88, 4.5, 5, 850], [89, 4.5, 4, 850], [88, 5.5, 4,",
"[85, 4, 4, 950], [85, 4, 5, 950], [60, 3.3, 5, 450], [61,",
"tích với giá tiền') ax1.set_xlabel('giá tiền') ax1.set_ylabel('Diện tích m2') ax2.plot(Y, X[:, 1]) ax2.set_title('xét",
"vd = np.array([dt_name, cd_name, tn_name, kc_name, 1]) if st.sidebar.button('Dự đoán'): y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3",
"fig = plt.figure(figsize=(15, 5)) ax3 = fig.add_subplot(1, 2, 1) ax4 = fig.add_subplot(1, 2,",
"450], [31.2, 5, 4, 450], [63, 5, 5, 550], [64, 4, 5, 550],",
"xác (R2 square) : \", r2_score(y_test, np.dot(x_test, w))) vd = np.array([dt_name, cd_name, tn_name,",
"[80, 5.8, 7, 1100], [80, 4.8, 8, 1100], [80, 5.8, 8, 1100], [79,",
"[45, 4, 4, 350], [45, 4, 5, 350], [45, 5, 4, 350], [45,",
"st.sidebar.text_input('Nhập khoảng cách nhà tới hồ gươm(m) ') one = np.ones((X.shape[0], 1)) Xbar",
"[86, 4.5, 3, 900], [86, 6, 5, 950], [60, 4.3, 5, 450], [62,",
"st.sidebar.text_input('Nhập chiều dài mặt tiền(m) ') tn_name = st.sidebar.text_input('Nhập số tầng nhà(tầng) ')",
"31, 31, 31, 31.3, 31.35, 31.5, 31.5, 31.63, 31.7, 32, 32, 32, 32,",
"550], [63, 5, 6 , 550], [63, 6, 4, 550], [80, 5.8, 7,",
"y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \\ float(tn_name)+w_4*float(kc_name) + w_0 st.sidebar.write('Giá của ngôi nhà là",
"[60, 3.3, 5, 450], [61, 6, 1, 800], [62, 5, 1, 800], [85,",
"là chiều dài mặt tiền (m) # x3 là số tầng nhà #",
"np.dot(np.linalg.pinv(A), b) w_0 = w[0][0] w_1 = w[1][0] w_2 = w[2][0] w_3 =",
"97.3, 97.5, 98, 98.5, 98.7, 99.5 ]]).T def duel_plot(X1, X2, Y): fig =",
"4, 850], [87, 4.5, 4, 850], [88, 4.5, 5, 850], [89, 4.5, 4,",
"1]) if st.sidebar.button('Dự đoán'): y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \\ float(tn_name)+w_4*float(kc_name) + w_0 st.sidebar.write('Giá",
"3, 900], [86, 6, 5, 950], [60, 4.3, 5, 450], [62, 7, 1,",
"4, 500], [140, 5.5, 5, 500], [140, 5.5, 6, 500], [141, 5.5, 5,",
"300], [88, 3.5, 4, 850], [88, 4.5, 3, 850], [88, 4.5, 4, 850],",
"1800], [45, 3, 4, 350], [45, 4, 3, 350], [45, 4, 4, 350],",
"import division, print_function, unicode_literals import streamlit as st from sklearn.metrics import mean_squared_error, r2_score",
"[81, 4.5, 6, 300], [88, 3.5, 4, 850], [88, 4.5, 3, 850], [88,",
"tới hồ gươm') return fig st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(duel_plot(X[:, 0], X[:, 1], Y)) st.pyplot(duel_plot2(X[:,",
"ax2 = fig.add_subplot(1, 2, 2) ax1.plot(Y, X[:, 0]) ax1.set_title('xét diện tích với giá",
"# x1 là diện tích của lô đất(m2) # x2 là chiều dài",
"3.3, 5, 450], [62, 6, 1, 800], [85, 6, 5, 950], [86, 3.5,",
"[32.2, 4 , 4, 450], [31.2, 5, 4, 450], [63, 5, 5, 550],",
"[62, 6, 1, 800], [85, 6, 5, 950], [86, 3.5, 3, 900], [62,",
"2, 1) ax2 = fig.add_subplot(1, 2, 2) ax1.plot(Y, X[:, 0]) ax1.set_title('xét diện tích",
"32.7, 33, 33, 33, 33.5, 33.5, 33.6, 34, 34, 34.3, 34.6, 35, 35,",
"34.3, 34.6, 35, 35, 35, 35.5, 35.7, 42.5, 42.9, 43, 43.463, 43.5, 43.537,",
"[140, 5.5, 5, 500], [140, 5.5, 6, 500], [141, 5.5, 5, 500], [140,",
"st.sidebar.text_input('Nhập diện tích đất(m2) ') cd_name = st.sidebar.text_input('Nhập chiều dài mặt tiền(m) ')",
"np.dot(x_test, w))) vd = np.array([dt_name, cd_name, tn_name, kc_name, 1]) if st.sidebar.button('Dự đoán'): y1",
"tới hồ gươm(m) ') one = np.ones((X.shape[0], 1)) Xbar = np.concatenate((one, X), axis=1)",
"[80, 4.5, 5, 300], [80, 4.5, 6, 300], [79, 4.5, 6, 300], [81,",
"ax2.set_title('xét số mét mặt tiền với giá tiền') ax2.set_xlabel('giá tiền') ax2.set_ylabel('số mét mặt",
"= fig.add_subplot(1, 2, 1) ax4 = fig.add_subplot(1, 2, 2) ax3.plot(Y, X[:, 2]) ax3.set_title('xét",
", 550], [63, 6, 4, 550], [80, 5.8, 7, 1100], [80, 4.8, 8,",
"5, 550], [63, 5, 6 , 550], [63, 6, 4, 550], [80, 5.8,",
"b = np.dot(Xbar.T, Y) w = np.dot(np.linalg.pinv(A), b) w_0 = w[0][0] w_1 =",
"5.5, 5, 500], [140, 5.5, 6, 500], [141, 5.5, 5, 500], [140, 6.5,",
"nhà đất tại hồ gươm ') # x1 là diện tích của lô",
"300], [80, 4.5, 5, 300], [80, 4.5, 6, 300], [79, 4.5, 6, 300],",
"5.5, 5, 500], [140, 5.5, 4, 500], [140, 5.5, 5, 500], [140, 5.5,",
"r2_score from sklearn.model_selection import train_test_split import pandas as pd import numpy as np",
"4, 350], [45, 4, 5, 350], [45, 5, 4, 350], [45, 3, 4,",
"Y = np.array([[ 19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5,",
"train_test_split(Xbar, Y, test_size=0.2) A = np.dot(Xbar.T, Xbar) b = np.dot(Xbar.T, Y) w =",
"if st.sidebar.button('Dự đoán'): y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \\ float(tn_name)+w_4*float(kc_name) + w_0 st.sidebar.write('Giá của",
"350], [60, 2.3, 5, 450], [59, 3.3, 5, 450], [60, 3.3, 4, 450],",
"kc_name, 1]) if st.sidebar.button('Dự đoán'): y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \\ float(tn_name)+w_4*float(kc_name) + w_0",
"900], [62, 6, 2, 800], [86, 3.5, 4, 900], [87, 3.5, 3, 900],",
"[63, 5, 5, 550], [64, 4, 5, 550], [63, 5, 6 , 550],",
"3.5, 3, 900], [86, 3.5, 2, 900], [31.2, 3, 4, 450], [61, 3.3,",
"500], [140, 6.5, 5, 500]]) Y = np.array([[ 19, 19.3, 19.45, 19.48, 19.5,",
"(m) X = np.array([[40, 8, 2, 1800], [36, 3.5, 6, 450], [35, 4.5,",
"y_train, y_test = train_test_split(Xbar, Y, test_size=0.2) A = np.dot(Xbar.T, Xbar) b = np.dot(Xbar.T,",
"[85, 3.5, 3, 900], [86, 3.5, 2, 900], [31.2, 3, 4, 450], [61,",
"X2, Y): fig = plt.figure(figsize=(15, 5)) ax1 = fig.add_subplot(1, 2, 1) ax2 =",
"from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split import pandas as pd",
"2, 1800], [36, 4.5, 7, 450], [40, 9, 3, 1800], [44, 4, 5,",
"5.8, 8, 1100], [80, 5.8, 9, 1100], [81, 5.8, 8, 1100], [80, 6.8,",
"35, 35.5, 35.7, 42.5, 42.9, 43, 43.463, 43.5, 43.537, 44.1, 50, 52.3, 53,",
"62.5, 63, 63.5, 63.7, 66, 96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5 ]]).T",
"np.ones((X.shape[0], 1)) Xbar = np.concatenate((one, X), axis=1) x_train, x_test, y_train, y_test = train_test_split(Xbar,",
"X), axis=1) x_train, x_test, y_train, y_test = train_test_split(Xbar, Y, test_size=0.2) A = np.dot(Xbar.T,",
"x2 là chiều dài mặt tiền (m) # x3 là số tầng nhà",
"4.5, 6, 450], [39, 9, 2, 1800], [40, 9, 1, 1800], [36, 4.5,",
"3, 4, 350], [45, 4, 3, 350], [45, 4, 4, 350], [45, 4,",
"4.5, 7, 450], [40, 9, 3, 1800], [44, 4, 5, 350], [41, 9,",
"4.5, 5, 500], [139, 5.5, 5, 500], [140, 5.5, 4, 500], [140, 5.5,",
"train_test_split import pandas as pd import numpy as np import matplotlib.pyplot as plt",
"giá các mẫu nhà') dt_name = st.sidebar.text_input('Nhập diện tích đất(m2) ') cd_name =",
"3, 900], [62, 6, 2, 800], [86, 3.5, 4, 900], [87, 3.5, 3,",
"= plt.figure(figsize=(15, 5)) ax3 = fig.add_subplot(1, 2, 1) ax4 = fig.add_subplot(1, 2, 2)",
"hình dự đoán giá nhà đất tại hồ gươm ') # x1 là",
"900], [86, 3.5, 2, 900], [31.2, 3, 4, 450], [61, 3.3, 5, 450],",
"800], [86, 4.5, 3, 900], [86, 6, 5, 950], [60, 4.3, 5, 450],",
"7, 300], [63, 6, 4, 250], [62, 7, 4, 250], [63, 7, 3,",
"st.write(\"Độ chính xác (R2 square) : \", r2_score(y_test, np.dot(x_test, w))) vd = np.array([dt_name,",
"\\ float(tn_name)+w_4*float(kc_name) + w_0 st.sidebar.write('Giá của ngôi nhà là : ', y1, 'tỷ",
"[86, 3.5, 3, 900], [62, 6, 2, 800], [86, 3.5, 4, 900], [87,",
"pandas as pd import numpy as np import matplotlib.pyplot as plt st.title('Mô hình",
"1800], [40, 9, 1, 1800], [36, 4.5, 5, 450], [36, 4.5, 6, 450],",
"32.3, 32.37, 32.4, 32.5, 32.65, 32.7, 33, 33, 33, 33.5, 33.5, 33.6, 34,",
"ax1.set_title('xét diện tích với giá tiền') ax1.set_xlabel('giá tiền') ax1.set_ylabel('Diện tích m2') ax2.plot(Y, X[:,",
"31, 31.3, 31.35, 31.5, 31.5, 31.63, 31.7, 32, 32, 32, 32, 32, 32.3,",
"8, 1100], [80, 5.8, 8, 1100], [79, 5.8, 8, 1100], [80, 5.8, 9,",
"5.5, 4, 850], [80, 5.5, 7, 300], [63, 6, 4, 250], [62, 7,",
"850], [88, 5.5, 4, 850], [80, 5.5, 7, 300], [63, 6, 4, 250],",
"def duel_plot(X1, X2, Y): fig = plt.figure(figsize=(15, 5)) ax1 = fig.add_subplot(1, 2, 1)",
"Y): fig = plt.figure(figsize=(15, 5)) ax1 = fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1,",
"diện tích của lô đất(m2) # x2 là chiều dài mặt tiền (m)",
"plt.figure(figsize=(15, 5)) ax3 = fig.add_subplot(1, 2, 1) ax4 = fig.add_subplot(1, 2, 2) ax3.plot(Y,",
"450], [31.2, 4, 3, 450], [62, 4, 5, 550], [31.2, 4, 5, 450],",
"[80, 5.8, 9, 1100], [81, 5.8, 8, 1100], [80, 6.8, 8, 1100], [80,",
"450], [61, 3.3, 5, 450], [62, 6, 1, 800], [85, 6, 5, 950],",
"tới hồ gươm (m) X = np.array([[40, 8, 2, 1800], [36, 3.5, 6,",
"[60, 3.3, 4, 450], [85, 4, 4, 950], [85, 4, 5, 950], [60,",
"3, 250], [63, 7, 4, 250], [63, 7, 5, 250], [64, 7, 4,",
"5, 500]]) Y = np.array([[ 19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20,",
"[87, 4.5, 4, 850], [88, 4.5, 5, 850], [89, 4.5, 4, 850], [88,",
"2) ax1.plot(Y, X[:, 0]) ax1.set_title('xét diện tích với giá tiền') ax1.set_xlabel('giá tiền') ax1.set_ylabel('Diện",
"5.8, 8, 1100], [80, 6.8, 8, 1100], [80, 3.5, 6, 300], [80, 4.5,",
"[41, 9, 2, 1800], [37, 4.5, 6, 450], [36, 5.5, 6, 450], [40,",
"tầng nhà với giá tiền') ax3.set_xlabel('giá tiền') ax3.set_ylabel('số tầng nhà') ax4.plot(Y, X[:, 3])",
"[63, 5, 3, 550], [63, 4, 5, 550], [32.2, 4 , 4, 450],",
"3, 350], [45, 4, 4, 350], [45, 4, 5, 350], [45, 5, 4,",
"33, 33.5, 33.5, 33.6, 34, 34, 34.3, 34.6, 35, 35, 35, 35.5, 35.7,",
"250], [140, 4.5, 5, 500], [139, 5.5, 5, 500], [140, 5.5, 4, 500],",
"60, 62.3, 62.5, 63, 63.5, 63.7, 66, 96.5, 97.3, 97.5, 98, 98.5, 98.7,",
"ax4 = fig.add_subplot(1, 2, 2) ax3.plot(Y, X[:, 2]) ax3.set_title('xét số tầng nhà với",
"[61, 3.3, 5, 450], [62, 6, 1, 800], [85, 6, 5, 950], [86,",
"tiền') ax3.set_xlabel('giá tiền') ax3.set_ylabel('số tầng nhà') ax4.plot(Y, X[:, 3]) ax4.set_title('xét khoảng cách với",
"fig st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(duel_plot(X[:, 0], X[:, 1], Y)) st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y))",
"[36, 3.5, 6, 450], [35, 4.5, 6, 450], [39, 9, 2, 1800], [40,",
"# x2 là chiều dài mặt tiền (m) # x3 là số tầng",
"diện tích đất(m2) ') cd_name = st.sidebar.text_input('Nhập chiều dài mặt tiền(m) ') tn_name",
"[84, 6, 5, 950], [86, 2.5, 3, 900], [60, 3.3, 6, 450], [85,",
"r2_score(y_test, np.dot(x_test, w))) vd = np.array([dt_name, cd_name, tn_name, kc_name, 1]) if st.sidebar.button('Dự đoán'):",
"6, 450], [39, 9, 2, 1800], [40, 9, 1, 1800], [36, 4.5, 5,",
"số tầng nhà # x4 là khoảng cách tới hồ gươm (m) X",
"5, 450], [36, 4.5, 6, 450], [40, 9, 2, 1800], [36, 4.5, 7,",
"2.3, 5, 450], [59, 3.3, 5, 450], [60, 3.3, 4, 450], [85, 4,",
"35.7, 42.5, 42.9, 43, 43.463, 43.5, 43.537, 44.1, 50, 52.3, 53, 53.38, 53.62,",
"tiền') ax1.set_ylabel('Diện tích m2') ax2.plot(Y, X[:, 1]) ax2.set_title('xét số mét mặt tiền với",
"[63, 4, 5, 550], [32.2, 4 , 4, 450], [31.2, 5, 4, 450],",
"w))) vd = np.array([dt_name, cd_name, tn_name, kc_name, 1]) if st.sidebar.button('Dự đoán'): y1 =",
"np.array([[40, 8, 2, 1800], [36, 3.5, 6, 450], [35, 4.5, 6, 450], [39,",
"450], [62, 6, 1, 800], [85, 6, 5, 950], [86, 3.5, 3, 900],",
"[86, 3.5, 4, 900], [87, 3.5, 3, 900], [30.2, 4, 4, 450], [62,",
"52.3, 53, 53.38, 53.62, 54, 54.5, 55, 55.46, 55.5, 55.54, 56, 56.7, 60,",
"[40, 9, 1, 1800], [36, 4.5, 5, 450], [36, 4.5, 6, 450], [40,",
"1800], [37, 4.5, 6, 450], [36, 5.5, 6, 450], [40, 10, 2, 1800],",
"850], [88, 4.5, 3, 850], [88, 4.5, 4, 850], [87, 4.5, 4, 850],",
"dài mặt tiền (m) # x3 là số tầng nhà # x4 là",
"[139, 5.5, 5, 500], [140, 5.5, 4, 500], [140, 5.5, 5, 500], [140,",
"') # x1 là diện tích của lô đất(m2) # x2 là chiều",
"= np.dot(np.linalg.pinv(A), b) w_0 = w[0][0] w_1 = w[1][0] w_2 = w[2][0] w_3",
"850], [87, 4.5, 4, 850], [88, 4.5, 5, 850], [89, 4.5, 4, 850],",
"450], [60, 3.3, 4, 450], [85, 4, 4, 950], [85, 4, 5, 950],",
"tiền (m) # x3 là số tầng nhà # x4 là khoảng cách",
"np.concatenate((one, X), axis=1) x_train, x_test, y_train, y_test = train_test_split(Xbar, Y, test_size=0.2) A =",
"Y)) st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y)) st.sidebar.title('Dự đoán giá các mẫu nhà') dt_name",
"__future__ import division, print_function, unicode_literals import streamlit as st from sklearn.metrics import mean_squared_error,",
"[88, 4.5, 4, 850], [87, 4.5, 4, 850], [88, 4.5, 5, 850], [89,",
"5, 6 , 550], [63, 6, 4, 550], [80, 5.8, 7, 1100], [80,",
"st.pyplot(duel_plot(X[:, 0], X[:, 1], Y)) st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y)) st.sidebar.title('Dự đoán giá",
"31.5, 31.5, 31.63, 31.7, 32, 32, 32, 32, 32, 32.3, 32.3, 32.37, 32.4,",
"4.5, 6, 450], [36, 5.5, 6, 450], [40, 10, 2, 1800], [45, 3,",
"5, 550], [64, 4, 5, 550], [63, 5, 6 , 550], [63, 6,",
"x_test, y_train, y_test = train_test_split(Xbar, Y, test_size=0.2) A = np.dot(Xbar.T, Xbar) b =",
"6, 4, 250], [62, 7, 4, 250], [63, 7, 3, 250], [63, 7,",
"X[:, 3]) ax4.set_title('xét khoảng cách với giá tiền') ax4.set_xlabel('giá tiền') ax4.set_ylabel('khoảng cách tới",
"import pandas as pd import numpy as np import matplotlib.pyplot as plt st.title('Mô",
"[36, 4.5, 7, 450], [40, 9, 3, 1800], [44, 4, 5, 350], [41,",
"500], [140, 5.5, 4, 500], [140, 5.5, 5, 500], [140, 5.5, 6, 500],",
"4, 6, 950], [84, 6, 5, 950], [86, 2.5, 3, 900], [60, 3.3,",
"[140, 5.5, 6, 500], [141, 5.5, 5, 500], [140, 6.5, 5, 500]]) Y",
"nhà(tầng) ') kc_name = st.sidebar.text_input('Nhập khoảng cách nhà tới hồ gươm(m) ') one",
"450], [63, 5, 3, 550], [63, 4, 5, 550], [32.2, 4 , 4,",
"np.dot(Xbar.T, Xbar) b = np.dot(Xbar.T, Y) w = np.dot(np.linalg.pinv(A), b) w_0 = w[0][0]",
"[62, 7, 1, 800], [63, 6, 1, 800], [31.2, 4, 4, 450], [31.2,",
"250], [62, 7, 4, 250], [63, 7, 3, 250], [63, 7, 4, 250],",
"19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5, 20.5, 20.52, 20.55,",
"[89, 4.5, 4, 850], [88, 5.5, 4, 850], [80, 5.5, 7, 300], [63,",
"0]) ax1.set_title('xét diện tích với giá tiền') ax1.set_xlabel('giá tiền') ax1.set_ylabel('Diện tích m2') ax2.plot(Y,",
"7, 5, 250], [64, 7, 4, 250], [63, 8, 4, 250], [140, 4.5,",
"[62, 7, 4, 250], [63, 7, 3, 250], [63, 7, 4, 250], [63,",
"5.8, 8, 1100], [79, 5.8, 8, 1100], [80, 5.8, 9, 1100], [81, 5.8,",
"54.5, 55, 55.46, 55.5, 55.54, 56, 56.7, 60, 62.3, 62.5, 63, 63.5, 63.7,",
"chiều dài mặt tiền (m) # x3 là số tầng nhà # x4",
"X[:, 0]) ax1.set_title('xét diện tích với giá tiền') ax1.set_xlabel('giá tiền') ax1.set_ylabel('Diện tích m2')",
"') kc_name = st.sidebar.text_input('Nhập khoảng cách nhà tới hồ gươm(m) ') one =",
"tầng nhà # x4 là khoảng cách tới hồ gươm (m) X =",
"tiền') ax2.set_xlabel('giá tiền') ax2.set_ylabel('số mét mặt tiền') return fig def duel_plot2(X4, X5, Y):",
"900], [30.2, 4, 4, 450], [62, 6, 3, 800], [86, 4.5, 3, 900],",
"3.3, 4, 450], [85, 4, 4, 950], [85, 4, 5, 950], [60, 3.3,",
"tiền') ax4.set_ylabel('khoảng cách tới hồ gươm') return fig st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(duel_plot(X[:, 0], X[:,",
"300], [81, 4.5, 6, 300], [88, 3.5, 4, 850], [88, 4.5, 3, 850],",
"mặt tiền(m) ') tn_name = st.sidebar.text_input('Nhập số tầng nhà(tầng) ') kc_name = st.sidebar.text_input('Nhập",
"1, 800], [85, 4, 6, 950], [84, 6, 5, 950], [86, 2.5, 3,",
"7, 1, 800], [63, 6, 1, 800], [31.2, 4, 4, 450], [31.2, 4,",
"4, 550], [80, 5.8, 7, 1100], [80, 4.8, 8, 1100], [80, 5.8, 8,",
"900], [60, 3.3, 6, 450], [85, 5, 5, 950], [85, 3.5, 3, 900],",
"kc_name = st.sidebar.text_input('Nhập khoảng cách nhà tới hồ gươm(m) ') one = np.ones((X.shape[0],",
"= w[4][0] st.write(\"Độ chính xác (R2 square) : \", r2_score(y_test, np.dot(x_test, w))) vd",
"250], [63, 7, 4, 250], [63, 7, 5, 250], [64, 7, 4, 250],",
"9, 2, 1800], [36, 4.5, 7, 450], [40, 9, 3, 1800], [44, 4,",
"34, 34, 34.3, 34.6, 35, 35, 35, 35.5, 35.7, 42.5, 42.9, 43, 43.463,",
"2, 2) ax1.plot(Y, X[:, 0]) ax1.set_title('xét diện tích với giá tiền') ax1.set_xlabel('giá tiền')",
"21.5, 21.7, 22, 22.5, 29, 30, 30.5, 30.5, 30.8, 31, 31, 31, 31,",
"tầng nhà') ax4.plot(Y, X[:, 3]) ax4.set_title('xét khoảng cách với giá tiền') ax4.set_xlabel('giá tiền')",
"450], [85, 5, 5, 950], [85, 3.5, 3, 900], [86, 3.5, 2, 900],",
"tiền với giá tiền') ax2.set_xlabel('giá tiền') ax2.set_ylabel('số mét mặt tiền') return fig def",
"w_1 = w[1][0] w_2 = w[2][0] w_3 = w[3][0] w_4 = w[4][0] st.write(\"Độ",
"5, 3, 550], [63, 4, 5, 550], [32.2, 4 , 4, 450], [31.2,",
"4, 350], [45, 3, 4, 350], [60, 2.3, 5, 450], [59, 3.3, 5,",
"= np.array([[40, 8, 2, 1800], [36, 3.5, 6, 450], [35, 4.5, 6, 450],",
"4, 450], [31.2, 4, 3, 450], [62, 4, 5, 550], [31.2, 4, 5,",
"5, 500], [140, 5.5, 4, 500], [140, 5.5, 5, 500], [140, 5.5, 6,",
"tiền') ax2.set_ylabel('số mét mặt tiền') return fig def duel_plot2(X4, X5, Y): fig =",
"32, 32, 32, 32.3, 32.3, 32.37, 32.4, 32.5, 32.65, 32.7, 33, 33, 33,",
"4, 3, 450], [62, 4, 5, 550], [31.2, 4, 5, 450], [63, 5,",
"w_3 = w[3][0] w_4 = w[4][0] st.write(\"Độ chính xác (R2 square) : \",",
"numpy as np import matplotlib.pyplot as plt st.title('Mô hình dự đoán giá nhà",
"3.5, 3, 900], [62, 6, 2, 800], [86, 3.5, 4, 900], [87, 3.5,",
"2, 900], [31.2, 3, 4, 450], [61, 3.3, 5, 450], [62, 6, 1,",
"3.3, 5, 450], [60, 3.3, 4, 450], [85, 4, 4, 950], [85, 4,",
"4.5, 5, 850], [89, 4.5, 4, 850], [88, 5.5, 4, 850], [80, 5.5,",
"= plt.figure(figsize=(15, 5)) ax1 = fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2, 2)",
"350], [45, 5, 4, 350], [45, 3, 4, 350], [60, 2.3, 5, 450],",
"55, 55.46, 55.5, 55.54, 56, 56.7, 60, 62.3, 62.5, 63, 63.5, 63.7, 66,",
"56, 56.7, 60, 62.3, 62.5, 63, 63.5, 63.7, 66, 96.5, 97.3, 97.5, 98,",
"ax3 = fig.add_subplot(1, 2, 1) ax4 = fig.add_subplot(1, 2, 2) ax3.plot(Y, X[:, 2])",
"[80, 4.5, 6, 300], [79, 4.5, 6, 300], [81, 4.5, 6, 300], [88,",
"4, 5, 550], [31.2, 4, 5, 450], [63, 5, 3, 550], [63, 4,",
"Y) w = np.dot(np.linalg.pinv(A), b) w_0 = w[0][0] w_1 = w[1][0] w_2 =",
"đoán giá các mẫu nhà') dt_name = st.sidebar.text_input('Nhập diện tích đất(m2) ') cd_name",
"[88, 5.5, 4, 850], [80, 5.5, 7, 300], [63, 6, 4, 250], [62,",
"5.5, 4, 500], [140, 5.5, 5, 500], [140, 5.5, 6, 500], [141, 5.5,",
"35.5, 35.7, 42.5, 42.9, 43, 43.463, 43.5, 43.537, 44.1, 50, 52.3, 53, 53.38,",
"ax2.plot(Y, X[:, 1]) ax2.set_title('xét số mét mặt tiền với giá tiền') ax2.set_xlabel('giá tiền')",
"đất(m2) ') cd_name = st.sidebar.text_input('Nhập chiều dài mặt tiền(m) ') tn_name = st.sidebar.text_input('Nhập",
"31.63, 31.7, 32, 32, 32, 32, 32, 32.3, 32.3, 32.37, 32.4, 32.5, 32.65,",
"X[:, 1]) ax2.set_title('xét số mét mặt tiền với giá tiền') ax2.set_xlabel('giá tiền') ax2.set_ylabel('số",
"8, 1100], [79, 5.8, 8, 1100], [80, 5.8, 9, 1100], [81, 5.8, 8,",
"# x4 là khoảng cách tới hồ gươm (m) X = np.array([[40, 8,",
"800], [86, 3.5, 4, 900], [87, 3.5, 3, 900], [30.2, 4, 4, 450],",
"7, 4, 250], [63, 7, 3, 250], [63, 7, 4, 250], [63, 7,",
"cd_name = st.sidebar.text_input('Nhập chiều dài mặt tiền(m) ') tn_name = st.sidebar.text_input('Nhập số tầng",
"9, 1100], [81, 5.8, 8, 1100], [80, 6.8, 8, 1100], [80, 3.5, 6,",
"tại hồ gươm ') # x1 là diện tích của lô đất(m2) #",
"32, 32, 32, 32, 32, 32.3, 32.3, 32.37, 32.4, 32.5, 32.65, 32.7, 33,",
"1100], [80, 3.5, 6, 300], [80, 4.5, 5, 300], [80, 4.5, 6, 300],",
"53.38, 53.62, 54, 54.5, 55, 55.46, 55.5, 55.54, 56, 56.7, 60, 62.3, 62.5,",
"st.title('Mô hình dự đoán giá nhà đất tại hồ gươm ') # x1",
"[61, 6, 1, 800], [62, 5, 1, 800], [85, 4, 6, 950], [84,",
"1, 800], [63, 6, 1, 800], [31.2, 4, 4, 450], [31.2, 4, 3,",
"22, 22.5, 29, 30, 30.5, 30.5, 30.8, 31, 31, 31, 31, 31.3, 31.35,",
"550], [63, 6, 4, 550], [80, 5.8, 7, 1100], [80, 4.8, 8, 1100],",
"w[0][0] w_1 = w[1][0] w_2 = w[2][0] w_3 = w[3][0] w_4 = w[4][0]",
"7, 4, 250], [63, 7, 5, 250], [64, 7, 4, 250], [63, 8,",
"= fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2, 2) ax1.plot(Y, X[:, 0]) ax1.set_title('xét",
"ax2.set_ylabel('số mét mặt tiền') return fig def duel_plot2(X4, X5, Y): fig = plt.figure(figsize=(15,",
"4, 5, 350], [45, 5, 4, 350], [45, 3, 4, 350], [60, 2.3,",
"550], [64, 4, 5, 550], [63, 5, 6 , 550], [63, 6, 4,",
"streamlit as st from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split import",
"98, 98.5, 98.7, 99.5 ]]).T def duel_plot(X1, X2, Y): fig = plt.figure(figsize=(15, 5))",
"8, 1100], [80, 5.8, 9, 1100], [81, 5.8, 8, 1100], [80, 6.8, 8,",
"ax1 = fig.add_subplot(1, 2, 1) ax2 = fig.add_subplot(1, 2, 2) ax1.plot(Y, X[:, 0])",
"tn_name = st.sidebar.text_input('Nhập số tầng nhà(tầng) ') kc_name = st.sidebar.text_input('Nhập khoảng cách nhà",
"5, 5, 950], [85, 3.5, 3, 900], [86, 3.5, 2, 900], [31.2, 3,",
"4, 250], [62, 7, 4, 250], [63, 7, 3, 250], [63, 7, 4,",
"tiền') ax1.set_xlabel('giá tiền') ax1.set_ylabel('Diện tích m2') ax2.plot(Y, X[:, 1]) ax2.set_title('xét số mét mặt",
"as pd import numpy as np import matplotlib.pyplot as plt st.title('Mô hình dự",
"import mean_squared_error, r2_score from sklearn.model_selection import train_test_split import pandas as pd import numpy",
"4.5, 5, 450], [36, 4.5, 6, 450], [40, 9, 2, 1800], [36, 4.5,",
"6.5, 5, 500]]) Y = np.array([[ 19, 19.3, 19.45, 19.48, 19.5, 19.7, 20,",
"32, 32, 32, 32, 32.3, 32.3, 32.37, 32.4, 32.5, 32.65, 32.7, 33, 33,",
"43.5, 43.537, 44.1, 50, 52.3, 53, 53.38, 53.62, 54, 54.5, 55, 55.46, 55.5,",
"300], [80, 4.5, 6, 300], [79, 4.5, 6, 300], [81, 4.5, 6, 300],",
"5, 4, 450], [63, 5, 5, 550], [64, 4, 5, 550], [63, 5,",
"300], [63, 6, 4, 250], [62, 7, 4, 250], [63, 7, 3, 250],",
"5, 500], [139, 5.5, 5, 500], [140, 5.5, 4, 500], [140, 5.5, 5,",
"[59, 3.3, 5, 450], [60, 3.3, 4, 450], [85, 4, 4, 950], [85,",
"5, 450], [60, 3.3, 4, 450], [85, 4, 4, 950], [85, 4, 5,",
"21, 21.3, 21.5, 21.7, 22, 22.5, 29, 30, 30.5, 30.5, 30.8, 31, 31,",
"square) : \", r2_score(y_test, np.dot(x_test, w))) vd = np.array([dt_name, cd_name, tn_name, kc_name, 1])",
"20.5, 20.5, 20.52, 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22, 22.5, 29,",
"gươm ') # x1 là diện tích của lô đất(m2) # x2 là",
"nhà') ax4.plot(Y, X[:, 3]) ax4.set_title('xét khoảng cách với giá tiền') ax4.set_xlabel('giá tiền') ax4.set_ylabel('khoảng",
"20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22, 22.5, 29, 30, 30.5, 30.5,",
"giá tiền') ax4.set_xlabel('giá tiền') ax4.set_ylabel('khoảng cách tới hồ gươm') return fig st.set_option('deprecation.showPyplotGlobalUse', False)",
"20.7, 21, 21, 21.3, 21.5, 21.7, 22, 22.5, 29, 30, 30.5, 30.5, 30.8,",
"[80, 5.8, 8, 1100], [79, 5.8, 8, 1100], [80, 5.8, 9, 1100], [81,",
"3.3, 6, 450], [85, 5, 5, 950], [85, 3.5, 3, 900], [86, 3.5,",
"250], [63, 7, 5, 250], [64, 7, 4, 250], [63, 8, 4, 250],",
"mét mặt tiền với giá tiền') ax2.set_xlabel('giá tiền') ax2.set_ylabel('số mét mặt tiền') return",
"3]) ax4.set_title('xét khoảng cách với giá tiền') ax4.set_xlabel('giá tiền') ax4.set_ylabel('khoảng cách tới hồ",
"[63, 5, 6 , 550], [63, 6, 4, 550], [80, 5.8, 7, 1100],",
"[45, 4, 3, 350], [45, 4, 4, 350], [45, 4, 5, 350], [45,",
"1]) ax2.set_title('xét số mét mặt tiền với giá tiền') ax2.set_xlabel('giá tiền') ax2.set_ylabel('số mét",
"ax1.set_xlabel('giá tiền') ax1.set_ylabel('Diện tích m2') ax2.plot(Y, X[:, 1]) ax2.set_title('xét số mét mặt tiền",
"[79, 5.8, 8, 1100], [80, 5.8, 9, 1100], [81, 5.8, 8, 1100], [80,",
"33, 33, 33.5, 33.5, 33.6, 34, 34, 34.3, 34.6, 35, 35, 35, 35.5,",
"900], [86, 6, 5, 950], [60, 4.3, 5, 450], [62, 7, 1, 800],",
"số mét mặt tiền với giá tiền') ax2.set_xlabel('giá tiền') ax2.set_ylabel('số mét mặt tiền')",
"mẫu nhà') dt_name = st.sidebar.text_input('Nhập diện tích đất(m2) ') cd_name = st.sidebar.text_input('Nhập chiều",
"22.5, 29, 30, 30.5, 30.5, 30.8, 31, 31, 31, 31, 31.3, 31.35, 31.5,",
"Xbar) b = np.dot(Xbar.T, Y) w = np.dot(np.linalg.pinv(A), b) w_0 = w[0][0] w_1",
"float(tn_name)+w_4*float(kc_name) + w_0 st.sidebar.write('Giá của ngôi nhà là : ', y1, 'tỷ đồng')",
"unicode_literals import streamlit as st from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import",
"ax3.set_ylabel('số tầng nhà') ax4.plot(Y, X[:, 3]) ax4.set_title('xét khoảng cách với giá tiền') ax4.set_xlabel('giá",
"98.5, 98.7, 99.5 ]]).T def duel_plot(X1, X2, Y): fig = plt.figure(figsize=(15, 5)) ax1",
"w[3][0] w_4 = w[4][0] st.write(\"Độ chính xác (R2 square) : \", r2_score(y_test, np.dot(x_test,",
"[45, 3, 4, 350], [60, 2.3, 5, 450], [59, 3.3, 5, 450], [60,",
"500], [141, 5.5, 5, 500], [140, 6.5, 5, 500]]) Y = np.array([[ 19,",
"33.5, 33.5, 33.6, 34, 34, 34.3, 34.6, 35, 35, 35, 35.5, 35.7, 42.5,",
"[80, 4.8, 8, 1100], [80, 5.8, 8, 1100], [79, 5.8, 8, 1100], [80,",
"1100], [80, 4.8, 8, 1100], [80, 5.8, 8, 1100], [79, 5.8, 8, 1100],",
"450], [39, 9, 2, 1800], [40, 9, 1, 1800], [36, 4.5, 5, 450],",
"X[:, 3], Y)) st.sidebar.title('Dự đoán giá các mẫu nhà') dt_name = st.sidebar.text_input('Nhập diện",
"6, 450], [40, 9, 2, 1800], [36, 4.5, 7, 450], [40, 9, 3,",
"3.5, 3, 900], [30.2, 4, 4, 450], [62, 6, 3, 800], [86, 4.5,",
"= w[2][0] w_3 = w[3][0] w_4 = w[4][0] st.write(\"Độ chính xác (R2 square)",
"5.5, 6, 500], [141, 5.5, 5, 500], [140, 6.5, 5, 500]]) Y =",
"[37, 4.5, 6, 450], [36, 5.5, 6, 450], [40, 10, 2, 1800], [45,",
"4.5, 5, 300], [80, 4.5, 6, 300], [79, 4.5, 6, 300], [81, 4.5,",
"19.48, 19.5, 19.7, 20, 20, 20.3, 20.5, 20.5, 20.52, 20.55, 20.7, 21, 21,",
", 4, 450], [31.2, 5, 4, 450], [63, 5, 5, 550], [64, 4,",
"450], [40, 9, 2, 1800], [36, 4.5, 7, 450], [40, 9, 3, 1800],",
"Y, test_size=0.2) A = np.dot(Xbar.T, Xbar) b = np.dot(Xbar.T, Y) w = np.dot(np.linalg.pinv(A),",
"as np import matplotlib.pyplot as plt st.title('Mô hình dự đoán giá nhà đất",
"550], [31.2, 4, 5, 450], [63, 5, 3, 550], [63, 4, 5, 550],",
"3.3, 5, 450], [61, 6, 1, 800], [62, 5, 1, 800], [85, 4,",
"= w[3][0] w_4 = w[4][0] st.write(\"Độ chính xác (R2 square) : \", r2_score(y_test,",
"42.9, 43, 43.463, 43.5, 43.537, 44.1, 50, 52.3, 53, 53.38, 53.62, 54, 54.5,",
"2, 1800], [40, 9, 1, 1800], [36, 4.5, 5, 450], [36, 4.5, 6,",
"b) w_0 = w[0][0] w_1 = w[1][0] w_2 = w[2][0] w_3 = w[3][0]",
"4, 450], [61, 3.3, 5, 450], [62, 6, 1, 800], [85, 6, 5,",
"lô đất(m2) # x2 là chiều dài mặt tiền (m) # x3 là",
"tầng nhà(tầng) ') kc_name = st.sidebar.text_input('Nhập khoảng cách nhà tới hồ gươm(m) ')",
"[40, 10, 2, 1800], [45, 3, 4, 350], [45, 4, 3, 350], [45,",
"33.5, 33.6, 34, 34, 34.3, 34.6, 35, 35, 35, 35.5, 35.7, 42.5, 42.9,",
"1, 800], [85, 6, 5, 950], [86, 3.5, 3, 900], [62, 6, 2,",
"[88, 4.5, 5, 850], [89, 4.5, 4, 850], [88, 5.5, 4, 850], [80,",
"2, 800], [86, 3.5, 4, 900], [87, 3.5, 3, 900], [30.2, 4, 4,",
"950], [86, 2.5, 3, 900], [60, 3.3, 6, 450], [85, 5, 5, 950],",
"450], [40, 9, 3, 1800], [44, 4, 5, 350], [41, 9, 2, 1800],",
"A = np.dot(Xbar.T, Xbar) b = np.dot(Xbar.T, Y) w = np.dot(np.linalg.pinv(A), b) w_0",
"khoảng cách với giá tiền') ax4.set_xlabel('giá tiền') ax4.set_ylabel('khoảng cách tới hồ gươm') return",
"5, 850], [89, 4.5, 4, 850], [88, 5.5, 4, 850], [80, 5.5, 7,",
"tiền') ax4.set_xlabel('giá tiền') ax4.set_ylabel('khoảng cách tới hồ gươm') return fig st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(duel_plot(X[:,",
"6, 3, 800], [86, 4.5, 3, 900], [86, 6, 5, 950], [60, 4.3,",
"8, 4, 250], [140, 4.5, 5, 500], [139, 5.5, 5, 500], [140, 5.5,",
"= w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \\ float(tn_name)+w_4*float(kc_name) + w_0 st.sidebar.write('Giá của ngôi nhà là :",
"[85, 5, 5, 950], [85, 3.5, 3, 900], [86, 3.5, 2, 900], [31.2,",
"6.8, 8, 1100], [80, 3.5, 6, 300], [80, 4.5, 5, 300], [80, 4.5,",
"[140, 6.5, 5, 500]]) Y = np.array([[ 19, 19.3, 19.45, 19.48, 19.5, 19.7,",
"30.5, 30.5, 30.8, 31, 31, 31, 31, 31.3, 31.35, 31.5, 31.5, 31.63, 31.7,",
"32.5, 32.65, 32.7, 33, 33, 33, 33.5, 33.5, 33.6, 34, 34, 34.3, 34.6,",
"cách nhà tới hồ gươm(m) ') one = np.ones((X.shape[0], 1)) Xbar = np.concatenate((one,",
"35, 35, 35.5, 35.7, 42.5, 42.9, 43, 43.463, 43.5, 43.537, 44.1, 50, 52.3,",
"[31.2, 5, 4, 450], [63, 5, 5, 550], [64, 4, 5, 550], [63,",
"4.5, 3, 850], [88, 4.5, 4, 850], [87, 4.5, 4, 850], [88, 4.5,",
"import numpy as np import matplotlib.pyplot as plt st.title('Mô hình dự đoán giá",
"300], [79, 4.5, 6, 300], [81, 4.5, 6, 300], [88, 3.5, 4, 850],",
"y_test = train_test_split(Xbar, Y, test_size=0.2) A = np.dot(Xbar.T, Xbar) b = np.dot(Xbar.T, Y)",
"4, 4, 450], [31.2, 4, 3, 450], [62, 4, 5, 550], [31.2, 4,",
"8, 2, 1800], [36, 3.5, 6, 450], [35, 4.5, 6, 450], [39, 9,",
"axis=1) x_train, x_test, y_train, y_test = train_test_split(Xbar, Y, test_size=0.2) A = np.dot(Xbar.T, Xbar)",
"3], Y)) st.sidebar.title('Dự đoán giá các mẫu nhà') dt_name = st.sidebar.text_input('Nhập diện tích",
"dài mặt tiền(m) ') tn_name = st.sidebar.text_input('Nhập số tầng nhà(tầng) ') kc_name =",
"9, 2, 1800], [37, 4.5, 6, 450], [36, 5.5, 6, 450], [40, 10,",
"[80, 3.5, 6, 300], [80, 4.5, 5, 300], [80, 4.5, 6, 300], [79,",
"5, 950], [60, 4.3, 5, 450], [62, 7, 1, 800], [63, 6, 1,",
"là diện tích của lô đất(m2) # x2 là chiều dài mặt tiền",
"4, 5, 350], [41, 9, 2, 1800], [37, 4.5, 6, 450], [36, 5.5,",
"as plt st.title('Mô hình dự đoán giá nhà đất tại hồ gươm ')",
"x_train, x_test, y_train, y_test = train_test_split(Xbar, Y, test_size=0.2) A = np.dot(Xbar.T, Xbar) b",
"print_function, unicode_literals import streamlit as st from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection",
"250], [64, 7, 4, 250], [63, 8, 4, 250], [140, 4.5, 5, 500],",
"X[:, 1], Y)) st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y)) st.sidebar.title('Dự đoán giá các mẫu",
"31.7, 32, 32, 32, 32, 32, 32.3, 32.3, 32.37, 32.4, 32.5, 32.65, 32.7,",
"w[2][0] w_3 = w[3][0] w_4 = w[4][0] st.write(\"Độ chính xác (R2 square) :",
"[62, 5, 1, 800], [85, 4, 6, 950], [84, 6, 5, 950], [86,",
"ax1.set_ylabel('Diện tích m2') ax2.plot(Y, X[:, 1]) ax2.set_title('xét số mét mặt tiền với giá",
"800], [63, 6, 1, 800], [31.2, 4, 4, 450], [31.2, 4, 3, 450],",
"fig.add_subplot(1, 2, 2) ax1.plot(Y, X[:, 0]) ax1.set_title('xét diện tích với giá tiền') ax1.set_xlabel('giá",
"4, 4, 450], [62, 6, 3, 800], [86, 4.5, 3, 900], [86, 6,",
"850], [80, 5.5, 7, 300], [63, 6, 4, 250], [62, 7, 4, 250],",
"np.array([[ 19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5, 20.5, 20.52,",
"7, 450], [40, 9, 3, 1800], [44, 4, 5, 350], [41, 9, 2,",
"return fig st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(duel_plot(X[:, 0], X[:, 1], Y)) st.pyplot(duel_plot2(X[:, 2], X[:, 3],",
"4.5, 6, 450], [40, 9, 2, 1800], [36, 4.5, 7, 450], [40, 9,",
"np.dot(Xbar.T, Y) w = np.dot(np.linalg.pinv(A), b) w_0 = w[0][0] w_1 = w[1][0] w_2",
"4, 5, 550], [32.2, 4 , 4, 450], [31.2, 5, 4, 450], [63,",
"hồ gươm ') # x1 là diện tích của lô đất(m2) # x2",
"31.5, 31.63, 31.7, 32, 32, 32, 32, 32, 32.3, 32.3, 32.37, 32.4, 32.5,",
"* \\ float(tn_name)+w_4*float(kc_name) + w_0 st.sidebar.write('Giá của ngôi nhà là : ', y1,",
"7, 3, 250], [63, 7, 4, 250], [63, 7, 5, 250], [64, 7,",
"1], Y)) st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y)) st.sidebar.title('Dự đoán giá các mẫu nhà')",
"4, 900], [87, 3.5, 3, 900], [30.2, 4, 4, 450], [62, 6, 3,",
"5, 5, 550], [64, 4, 5, 550], [63, 5, 6 , 550], [63,",
"19.5, 19.7, 20, 20, 20.3, 20.5, 20.5, 20.52, 20.55, 20.7, 21, 21, 21.3,",
"53, 53.38, 53.62, 54, 54.5, 55, 55.46, 55.5, 55.54, 56, 56.7, 60, 62.3,",
"5, 450], [59, 3.3, 5, 450], [60, 3.3, 4, 450], [85, 4, 4,",
"30.5, 30.8, 31, 31, 31, 31, 31.3, 31.35, 31.5, 31.5, 31.63, 31.7, 32,",
"55.54, 56, 56.7, 60, 62.3, 62.5, 63, 63.5, 63.7, 66, 96.5, 97.3, 97.5,",
"= np.concatenate((one, X), axis=1) x_train, x_test, y_train, y_test = train_test_split(Xbar, Y, test_size=0.2) A",
"(R2 square) : \", r2_score(y_test, np.dot(x_test, w))) vd = np.array([dt_name, cd_name, tn_name, kc_name,",
"500], [140, 5.5, 5, 500], [140, 5.5, 6, 500], [141, 5.5, 5, 500],",
"31, 31, 31.3, 31.35, 31.5, 31.5, 31.63, 31.7, 32, 32, 32, 32, 32,",
"43.463, 43.5, 43.537, 44.1, 50, 52.3, 53, 53.38, 53.62, 54, 54.5, 55, 55.46,",
"3.5, 4, 850], [88, 4.5, 3, 850], [88, 4.5, 4, 850], [87, 4.5,",
"fig.add_subplot(1, 2, 2) ax3.plot(Y, X[:, 2]) ax3.set_title('xét số tầng nhà với giá tiền')",
"pd import numpy as np import matplotlib.pyplot as plt st.title('Mô hình dự đoán",
"[63, 6, 1, 800], [31.2, 4, 4, 450], [31.2, 4, 3, 450], [62,",
"5, 250], [64, 7, 4, 250], [63, 8, 4, 250], [140, 4.5, 5,",
"[81, 5.8, 8, 1100], [80, 6.8, 8, 1100], [80, 3.5, 6, 300], [80,",
"4, 4, 950], [85, 4, 5, 950], [60, 3.3, 5, 450], [61, 6,",
"test_size=0.2) A = np.dot(Xbar.T, Xbar) b = np.dot(Xbar.T, Y) w = np.dot(np.linalg.pinv(A), b)",
"w = np.dot(np.linalg.pinv(A), b) w_0 = w[0][0] w_1 = w[1][0] w_2 = w[2][0]",
"21.3, 21.5, 21.7, 22, 22.5, 29, 30, 30.5, 30.5, 30.8, 31, 31, 31,",
"54, 54.5, 55, 55.46, 55.5, 55.54, 56, 56.7, 60, 62.3, 62.5, 63, 63.5,",
"850], [89, 4.5, 4, 850], [88, 5.5, 4, 850], [80, 5.5, 7, 300],",
"giá tiền') ax3.set_xlabel('giá tiền') ax3.set_ylabel('số tầng nhà') ax4.plot(Y, X[:, 3]) ax4.set_title('xét khoảng cách",
"500]]) Y = np.array([[ 19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3,",
"import streamlit as st from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split",
"dt_name = st.sidebar.text_input('Nhập diện tích đất(m2) ') cd_name = st.sidebar.text_input('Nhập chiều dài mặt",
"1100], [79, 5.8, 8, 1100], [80, 5.8, 9, 1100], [81, 5.8, 8, 1100],",
"0], X[:, 1], Y)) st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y)) st.sidebar.title('Dự đoán giá các",
"4.8, 8, 1100], [80, 5.8, 8, 1100], [79, 5.8, 8, 1100], [80, 5.8,",
"[44, 4, 5, 350], [41, 9, 2, 1800], [37, 4.5, 6, 450], [36,",
"5, 1, 800], [85, 4, 6, 950], [84, 6, 5, 950], [86, 2.5,",
"550], [80, 5.8, 7, 1100], [80, 4.8, 8, 1100], [80, 5.8, 8, 1100],",
"8, 1100], [80, 6.8, 8, 1100], [80, 3.5, 6, 300], [80, 4.5, 5,",
"X5, Y): fig = plt.figure(figsize=(15, 5)) ax3 = fig.add_subplot(1, 2, 1) ax4 =",
"98.7, 99.5 ]]).T def duel_plot(X1, X2, Y): fig = plt.figure(figsize=(15, 5)) ax1 =",
"5, 350], [45, 5, 4, 350], [45, 3, 4, 350], [60, 2.3, 5,",
"[86, 3.5, 2, 900], [31.2, 3, 4, 450], [61, 3.3, 5, 450], [62,",
"import matplotlib.pyplot as plt st.title('Mô hình dự đoán giá nhà đất tại hồ",
"= fig.add_subplot(1, 2, 2) ax1.plot(Y, X[:, 0]) ax1.set_title('xét diện tích với giá tiền')",
"4, 3, 350], [45, 4, 4, 350], [45, 4, 5, 350], [45, 5,",
"st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y)) st.sidebar.title('Dự đoán giá các mẫu nhà') dt_name =",
"import train_test_split import pandas as pd import numpy as np import matplotlib.pyplot as",
"[85, 4, 5, 950], [60, 3.3, 5, 450], [61, 6, 1, 800], [62,",
"4, 350], [45, 4, 3, 350], [45, 4, 4, 350], [45, 4, 5,",
"19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5, 20.5, 20.52, 20.55, 20.7, 21,",
"900], [87, 3.5, 3, 900], [30.2, 4, 4, 450], [62, 6, 3, 800],",
"st from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split import pandas as",
"gươm(m) ') one = np.ones((X.shape[0], 1)) Xbar = np.concatenate((one, X), axis=1) x_train, x_test,",
"= np.dot(Xbar.T, Xbar) b = np.dot(Xbar.T, Y) w = np.dot(np.linalg.pinv(A), b) w_0 =",
"6, 300], [88, 3.5, 4, 850], [88, 4.5, 3, 850], [88, 4.5, 4,",
"5)) ax3 = fig.add_subplot(1, 2, 1) ax4 = fig.add_subplot(1, 2, 2) ax3.plot(Y, X[:,",
"= np.ones((X.shape[0], 1)) Xbar = np.concatenate((one, X), axis=1) x_train, x_test, y_train, y_test =",
"1100], [80, 5.8, 9, 1100], [81, 5.8, 8, 1100], [80, 6.8, 8, 1100],",
"20.52, 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22, 22.5, 29, 30, 30.5,",
"33.6, 34, 34, 34.3, 34.6, 35, 35, 35, 35.5, 35.7, 42.5, 42.9, 43,",
"w[1][0] w_2 = w[2][0] w_3 = w[3][0] w_4 = w[4][0] st.write(\"Độ chính xác",
"5, 350], [41, 9, 2, 1800], [37, 4.5, 6, 450], [36, 5.5, 6,",
"= fig.add_subplot(1, 2, 2) ax3.plot(Y, X[:, 2]) ax3.set_title('xét số tầng nhà với giá",
"850], [88, 4.5, 4, 850], [87, 4.5, 4, 850], [88, 4.5, 5, 850],",
"sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split import pandas as pd import",
"ax3.set_xlabel('giá tiền') ax3.set_ylabel('số tầng nhà') ax4.plot(Y, X[:, 3]) ax4.set_title('xét khoảng cách với giá",
"4, 350], [60, 2.3, 5, 450], [59, 3.3, 5, 450], [60, 3.3, 4,",
"cách tới hồ gươm') return fig st.set_option('deprecation.showPyplotGlobalUse', False) st.pyplot(duel_plot(X[:, 0], X[:, 1], Y))",
"1100], [80, 6.8, 8, 1100], [80, 3.5, 6, 300], [80, 4.5, 5, 300],",
"21.7, 22, 22.5, 29, 30, 30.5, 30.5, 30.8, 31, 31, 31, 31, 31.3,",
"cách với giá tiền') ax4.set_xlabel('giá tiền') ax4.set_ylabel('khoảng cách tới hồ gươm') return fig",
"= st.sidebar.text_input('Nhập khoảng cách nhà tới hồ gươm(m) ') one = np.ones((X.shape[0], 1))",
"950], [60, 4.3, 5, 450], [62, 7, 1, 800], [63, 6, 1, 800],",
"duel_plot(X1, X2, Y): fig = plt.figure(figsize=(15, 5)) ax1 = fig.add_subplot(1, 2, 1) ax2",
"st.sidebar.button('Dự đoán'): y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \\ float(tn_name)+w_4*float(kc_name) + w_0 st.sidebar.write('Giá của ngôi",
"]]).T def duel_plot(X1, X2, Y): fig = plt.figure(figsize=(15, 5)) ax1 = fig.add_subplot(1, 2,",
"[60, 4.3, 5, 450], [62, 7, 1, 800], [63, 6, 1, 800], [31.2,",
"đoán'): y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \\ float(tn_name)+w_4*float(kc_name) + w_0 st.sidebar.write('Giá của ngôi nhà",
"[62, 4, 5, 550], [31.2, 4, 5, 450], [63, 5, 3, 550], [63,",
"fig.add_subplot(1, 2, 1) ax4 = fig.add_subplot(1, 2, 2) ax3.plot(Y, X[:, 2]) ax3.set_title('xét số",
"5, 950], [86, 3.5, 3, 900], [62, 6, 2, 800], [86, 3.5, 4,",
"63, 63.5, 63.7, 66, 96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5 ]]).T def",
"2], X[:, 3], Y)) st.sidebar.title('Dự đoán giá các mẫu nhà') dt_name = st.sidebar.text_input('Nhập",
"3, 4, 450], [61, 3.3, 5, 450], [62, 6, 1, 800], [85, 6,",
"550], [32.2, 4 , 4, 450], [31.2, 5, 4, 450], [63, 5, 5,",
"4, 250], [63, 8, 4, 250], [140, 4.5, 5, 500], [139, 5.5, 5,",
"6, 1, 800], [31.2, 4, 4, 450], [31.2, 4, 3, 450], [62, 4,",
"one = np.ones((X.shape[0], 1)) Xbar = np.concatenate((one, X), axis=1) x_train, x_test, y_train, y_test",
": \", r2_score(y_test, np.dot(x_test, w))) vd = np.array([dt_name, cd_name, tn_name, kc_name, 1]) if",
"ax3.set_title('xét số tầng nhà với giá tiền') ax3.set_xlabel('giá tiền') ax3.set_ylabel('số tầng nhà') ax4.plot(Y,",
"6, 2, 800], [86, 3.5, 4, 900], [87, 3.5, 3, 900], [30.2, 4,",
"4, 250], [140, 4.5, 5, 500], [139, 5.5, 5, 500], [140, 5.5, 4,",
"800], [31.2, 4, 4, 450], [31.2, 4, 3, 450], [62, 4, 5, 550],",
"x1 là diện tích của lô đất(m2) # x2 là chiều dài mặt",
"với giá tiền') ax4.set_xlabel('giá tiền') ax4.set_ylabel('khoảng cách tới hồ gươm') return fig st.set_option('deprecation.showPyplotGlobalUse',",
"5, 450], [61, 6, 1, 800], [62, 5, 1, 800], [85, 4, 6,",
"450], [63, 5, 5, 550], [64, 4, 5, 550], [63, 5, 6 ,",
"2, 2) ax3.plot(Y, X[:, 2]) ax3.set_title('xét số tầng nhà với giá tiền') ax3.set_xlabel('giá",
"= st.sidebar.text_input('Nhập diện tích đất(m2) ') cd_name = st.sidebar.text_input('Nhập chiều dài mặt tiền(m)",
"cách tới hồ gươm (m) X = np.array([[40, 8, 2, 1800], [36, 3.5,",
"6, 1, 800], [85, 6, 5, 950], [86, 3.5, 3, 900], [62, 6,",
"34, 34.3, 34.6, 35, 35, 35, 35.5, 35.7, 42.5, 42.9, 43, 43.463, 43.5,",
"42.5, 42.9, 43, 43.463, 43.5, 43.537, 44.1, 50, 52.3, 53, 53.38, 53.62, 54,",
"3.5, 4, 900], [87, 3.5, 3, 900], [30.2, 4, 4, 450], [62, 6,",
"tích của lô đất(m2) # x2 là chiều dài mặt tiền (m) #",
"3, 550], [63, 4, 5, 550], [32.2, 4 , 4, 450], [31.2, 5,",
"plt st.title('Mô hình dự đoán giá nhà đất tại hồ gươm ') #",
"2, 1800], [36, 3.5, 6, 450], [35, 4.5, 6, 450], [39, 9, 2,",
"43.537, 44.1, 50, 52.3, 53, 53.38, 53.62, 54, 54.5, 55, 55.46, 55.5, 55.54,",
"m2') ax2.plot(Y, X[:, 1]) ax2.set_title('xét số mét mặt tiền với giá tiền') ax2.set_xlabel('giá",
"35, 35, 35, 35.5, 35.7, 42.5, 42.9, 43, 43.463, 43.5, 43.537, 44.1, 50,"
] |
[
"del_expect = [11017, 11030] # Tests bit = bitarray.bitarray(11030 + 1) big_dict =",
"'25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11010]",
"#end def def test_run_toBig_abthr_25_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',",
"11010, 11013, 11023] ins_expect = [11005, 11022] del_expect = [11017, 11030] # Tests",
"'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run and Tests with pytest.raises(Exception) as e: assert main_toBig(args)",
"granite.lib.shared_functions import * ################################################################# # Tests ################################################################# def test_run_toBig_rdthr_2_all(): ''' ''' # Variables",
"Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '4', 'rdthr':",
"= [11005, 11022] del_expect = [11017, 11030] # Tests bit = bitarray.bitarray(11030 +",
"# Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '4',",
"# Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr':",
"assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_all(): ''' '''",
"= [11007, 11010, 11013] ins_expect = [11022] del_expect = [11030] # Tests bit",
"Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_15_all(): ''' ''' # Variables args = {'file':",
"Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_2(): ''' ''' # Variables args = {'file':",
"== bit # Clean os.remove('tests/files/main_test.out') #end def ################################################################# # Errors ################################################################# def test_run_toBig_rdthr_2_all_miss_pos():",
"Run and Tests with pytest.raises(Exception) as e: assert main_toBig(args) assert str(e.value) == '\\nERROR",
"main_toBig(args) # Expected snv_expect = [11007] ins_expect = [] del_expect = [] #",
"big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_15_all(): ''' ''' #",
"Tests with pytest.raises(Exception) as e: assert main_toBig(args) assert str(e.value) == '\\nERROR in file:",
"pytest import bitarray from granite.toBig import ( main as main_toBig ) from granite.lib.shared_functions",
"''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3',",
"bit.setall(False) for i in ins_expect: bit[i] = True #end for assert big_dict['13_ins'][:11031] ==",
"= True #end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def",
"assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def ################################################################# # Errors #################################################################",
"################################################################# # Tests ################################################################# def test_run_toBig_rdthr_2_all(): ''' ''' # Variables args = {'file':",
"i in ins_expect: bit[i] = True #end for assert big_dict['13_ins'][:11031] == bit #",
"main_toBig(args) # Expected snv_expect = [11007, 11010] ins_expect = [] del_expect = []",
"None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = []",
"'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11001, 11007, 11010] ins_expect =",
"# Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_all(): ''' ''' # Variables args =",
"'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} #",
"[11007] ins_expect = [] del_expect = [] # Tests bit = bitarray.bitarray(11030 +",
"#end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_all():",
"{'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': None, 'ncores': '1', 'abthr':",
"= {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None,",
"assert str(e.value) == '\\nERROR in file: position 13:11006 in file tests/files/input_toBig_miss_pos.rck.gz is not",
"Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_all(): ''' ''' # Variables args = {'file':",
"True #end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def #################################################################",
"with pytest.raises(Exception) as e: assert main_toBig(args) assert str(e.value) == '\\nERROR in file: position",
"== bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_2(): ''' ''' # Variables",
"'2', 'rdthr': None, 'ncores': '1', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run",
"#end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_2():",
"11022] del_expect = [11017, 11030] # Tests bit = bitarray.bitarray(11030 + 1) big_dict",
") from granite.lib.shared_functions import * ################################################################# # Tests ################################################################# def test_run_toBig_rdthr_2_all(): ''' '''",
"{'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '17', 'ncores': '1', 'abthr':",
"big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_1_single(): ''' ''' #",
"for assert big_dict['13_ins'][:11031] == bit # Check del bit.setall(False) for i in del_expect:",
"'rdthr': None, 'ncores': '1', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args)",
"# Check del bit.setall(False) for i in del_expect: bit[i] = True #end for",
"#end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2_single():",
"test_run_toBig_rdthr_2_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',",
"test_run_toBig_rdthr_2_2_single(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2',",
"snv_expect = [11010] ins_expect = [] del_expect = [] # Tests bit =",
"def def test_run_toBig_abthr_15_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'],",
"import * ################################################################# # Tests ################################################################# def test_run_toBig_rdthr_2_all(): ''' ''' # Variables args",
"'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007, 11010, 11013] ins_expect =",
"# Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_1_single(): ''' ''' # Variables args =",
"= {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2',",
"= True #end for assert big_dict['13_snv'][:11031] == bit # Check ins bit.setall(False) for",
"''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr':",
"import bitarray from granite.toBig import ( main as main_toBig ) from granite.lib.shared_functions import",
"== '\\nERROR in file: position 13:11006 in file tests/files/input_toBig_miss_pos.rck.gz is not consistent with",
"'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'}",
"'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '4', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile':",
"for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_2(): '''",
"test_run_toBig_rdthr_17_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',",
"# Run main_toBig(args) # Expected snv_expect = [11001, 11002, 11007, 11010, 11013, 11023]",
"None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007]",
"for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2(): '''",
"'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run and",
"# Expected snv_expect = [11007, 11010, 11013] ins_expect = [11022] del_expect = [11030]",
"''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '1', 'rdthr': '2',",
"= {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': None, 'ncores': '1',",
"'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions',",
"= [] del_expect = [] # Tests bit = bitarray.bitarray(11030 + 1) big_dict",
"bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_2(): ''' ''' # Variables args",
"Expected snv_expect = [] ins_expect = [] del_expect = [] # Tests bit",
"'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [] ins_expect = [] del_expect",
"'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile':",
"os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz',",
"Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2_single(): ''' ''' # Variables args = {'file':",
"os.remove('tests/files/main_test.out') #end def ################################################################# # Errors ################################################################# def test_run_toBig_rdthr_2_all_miss_pos(): ''' ''' # Variables",
"'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007, 11010, 11013]",
"del bit.setall(False) for i in del_expect: bit[i] = True #end for assert big_dict['13_del'][:11031]",
"from granite.toBig import ( main as main_toBig ) from granite.lib.shared_functions import * #################################################################",
"'25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007,",
"test_run_toBig_rdthr_2_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',",
"''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2',",
"'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile':",
"'outputfile': 'tests/files/main_test.out', 'fithr': '1', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile':",
"def test_run_toBig_rdthr_17_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile':",
"'3', 'rdthr': None, 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run",
"as e: assert main_toBig(args) assert str(e.value) == '\\nERROR in file: position 13:11006 in",
"11010] ins_expect = [] del_expect = [] # Tests bit = bitarray.bitarray(11030 +",
"big_dict['13_snv'][:11031] == bit # Check ins bit.setall(False) for i in ins_expect: bit[i] =",
"'fithr': '3', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} #",
"def def test_run_toBig_abthr_25_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'],",
"def test_run_toBig_abthr_25_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile':",
"assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2(): ''' '''",
"load_big('tests/files/main_test.out') # Check snv bit.setall(False) for i in snv_expect: bit[i] = True #end",
"#end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_1_single():",
"main_toBig(args) # Expected snv_expect = [11007, 11010, 11013] ins_expect = [11022] del_expect =",
"os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_15_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz',",
"'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '17', 'ncores': '1', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile':",
"1) big_dict = load_big('tests/files/main_test.out') # Check snv bit.setall(False) for i in snv_expect: bit[i]",
"big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2_single(): ''' ''' #",
"#end def def test_run_toBig_rdthr_2_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',",
"'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile':",
"def test_run_toBig_rdthr_2_all_miss_pos(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'],",
"'2', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run",
"'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [] ins_expect = []",
"'fithr': '1', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} #",
"= {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '17', 'ncores': None,",
"'tests/files/main_test.out', 'fithr': '4', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'}",
"= [11007, 11010] ins_expect = [] del_expect = [] # Tests bit =",
"True #end for assert big_dict['13_snv'][:11031] == bit # Check ins bit.setall(False) for i",
"== bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_all(): ''' ''' # Variables",
"pytest.raises(Exception) as e: assert main_toBig(args) assert str(e.value) == '\\nERROR in file: position 13:11006",
"'chromfile': 'tests/files/input_toBig.chrom.size'} # Run and Tests with pytest.raises(Exception) as e: assert main_toBig(args) assert",
"################################################################# # Errors ################################################################# def test_run_toBig_rdthr_2_all_miss_pos(): ''' ''' # Variables args = {'file':",
"assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_2(): ''' '''",
"'tests/files/main_test.out', 'fithr': '3', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'}",
"= {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '17', 'ncores': '1',",
"'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11010] ins_expect",
"snv_expect = [11001, 11002, 11007, 11010, 11013, 11023] ins_expect = [11005, 11022] del_expect",
"'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007, 11010] ins_expect",
"= {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '1', 'rdthr': '2', 'ncores': '2', 'abthr': None,",
"= [11001, 11002, 11007, 11010, 11013, 11023] ins_expect = [11005, 11022] del_expect =",
"#end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_all():",
"for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_all(): '''",
"def test_run_toBig_rdthr_2_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile':",
"os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz',",
"11013, 11023] ins_expect = [11005, 11022] del_expect = [11017, 11030] # Tests bit",
"+ 1) big_dict = load_big('tests/files/main_test.out') # Check snv bit.setall(False) for i in snv_expect:",
"as main_toBig ) from granite.lib.shared_functions import * ################################################################# # Tests ################################################################# def test_run_toBig_rdthr_2_all():",
"= [11001, 11007, 11010] ins_expect = [11005, 11022] del_expect = [11017, 11030] #",
"big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2(): ''' ''' #",
"Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '2',",
"bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_1_single(): ''' ''' # Variables args",
"# Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_2(): ''' ''' # Variables args =",
"= {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2',",
"[11001, 11002, 11007, 11010, 11013, 11023] ins_expect = [11005, 11022] del_expect = [11017,",
"for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_1_single(): '''",
"[11001, 11007, 11010] ins_expect = [11005, 11022] del_expect = [11017, 11030] # Tests",
"'tests/files/main_test.out', 'fithr': '2', 'rdthr': '17', 'ncores': '1', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'}",
"Expected snv_expect = [11001, 11002, 11007, 11010, 11013, 11023] ins_expect = [11005, 11022]",
"'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '17', 'ncores': None, 'abthr': None, 'regionfile':",
"'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '17', 'ncores': '1', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions',",
"'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions',",
"''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out',",
"[11007, 11010, 11013] ins_expect = [11022] del_expect = [11030] # Tests bit =",
"args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '17', 'ncores':",
"'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11001, 11002,",
"assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_all(): ''' '''",
"'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': None, 'regionfile':",
"True #end for assert big_dict['13_ins'][:11031] == bit # Check del bit.setall(False) for i",
"Check del bit.setall(False) for i in del_expect: bit[i] = True #end for assert",
"'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '17', 'ncores': None, 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions',",
"( main as main_toBig ) from granite.lib.shared_functions import * ################################################################# # Tests #################################################################",
"big_dict = load_big('tests/files/main_test.out') # Check snv bit.setall(False) for i in snv_expect: bit[i] =",
"# Expected snv_expect = [] ins_expect = [] del_expect = [] # Tests",
"os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2_single(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'],",
"Libraries ################################################################# import sys, os import pytest import bitarray from granite.toBig import (",
"# Libraries ################################################################# import sys, os import pytest import bitarray from granite.toBig import",
"granite.toBig import ( main as main_toBig ) from granite.lib.shared_functions import * ################################################################# #",
"'fithr': '2', 'rdthr': '17', 'ncores': '1', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} #",
"args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr':",
"'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} #",
"os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz',",
"in file: position 13:11006 in file tests/files/input_toBig_miss_pos.rck.gz is not consistent with other files\\n'",
"def test_run_toBig_abthr_15_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile':",
"assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_2(): ''' '''",
"bit.setall(False) for i in del_expect: bit[i] = True #end for assert big_dict['13_del'][:11031] ==",
"# Expected snv_expect = [11001, 11002, 11007, 11010, 11013, 11023] ins_expect = [11005,",
"'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007, 11010,",
"args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '4', 'rdthr': '2',",
"Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None,",
"'outputfile': 'tests/files/main_test.out', 'fithr': '4', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile':",
"bit[i] = True #end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end",
"# Expected snv_expect = [11007] ins_expect = [] del_expect = [] # Tests",
"#end def def test_run_toBig_rdthr_17_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',",
"Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2',",
"'tests/files/main_test.out', 'fithr': '3', 'rdthr': '17', 'ncores': None, 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'}",
"# Run main_toBig(args) # Expected snv_expect = [11007] ins_expect = [] del_expect =",
"'tests/files/input_toBig.chrom.size'} # Run and Tests with pytest.raises(Exception) as e: assert main_toBig(args) assert str(e.value)",
"'ncores': '1', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected",
"'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11001, 11007, 11010]",
"'tests/files/main_test.out', 'fithr': '2', 'rdthr': None, 'ncores': '1', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'}",
"'1', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect",
"test_run_toBig_rdthr_17_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',",
"'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': '25', 'regionfile':",
"None, 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) #",
"'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': None, 'ncores': '1', 'abthr': '25', 'regionfile':",
"import pytest import bitarray from granite.toBig import ( main as main_toBig ) from",
"def def test_run_toBig_abthr_25_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'],",
"== bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_15_all(): ''' ''' # Variables",
"################################################################# def test_run_toBig_rdthr_2_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'],",
"[11030] # Tests bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out') # Check",
"== bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_all(): ''' ''' # Variables",
"'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [] ins_expect =",
"Tests ################################################################# def test_run_toBig_rdthr_2_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',",
"bit.setall(False) for i in snv_expect: bit[i] = True #end for assert big_dict['13_snv'][:11031] ==",
"Expected snv_expect = [11010] ins_expect = [] del_expect = [] # Tests bit",
"# Expected snv_expect = [11001, 11007, 11010] ins_expect = [11005, 11022] del_expect =",
"== bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2(): ''' ''' # Variables",
"def test_run_toBig_rdthr_2_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile':",
"== bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_2(): ''' ''' # Variables",
"''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2',",
"and Tests with pytest.raises(Exception) as e: assert main_toBig(args) assert str(e.value) == '\\nERROR in",
"11007, 11010] ins_expect = [11005, 11022] del_expect = [11017, 11030] # Tests bit",
"#end def def test_run_toBig_rdthr_2_1_single(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile':",
"'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'}",
"['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': None, 'ncores': '1', 'abthr': '25',",
"[] ins_expect = [] del_expect = [] # Tests bit = bitarray.bitarray(11030 +",
"big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_all(): ''' ''' #",
"{'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '1', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile':",
"'rdthr': '17', 'ncores': '1', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args)",
"'17', 'ncores': '1', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) #",
"in ins_expect: bit[i] = True #end for assert big_dict['13_ins'][:11031] == bit # Check",
"11023] ins_expect = [11005, 11022] del_expect = [11017, 11030] # Tests bit =",
"Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_all(): ''' ''' # Variables args = {'file':",
"None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11001,",
"''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '1', 'rdthr':",
"['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '4', 'rdthr': '2', 'ncores': '2', 'abthr':",
"11010] ins_expect = [11005, 11022] del_expect = [11017, 11030] # Tests bit =",
"None, 'ncores': '2', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) #",
"'3', 'rdthr': '17', 'ncores': None, 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run",
"import ( main as main_toBig ) from granite.lib.shared_functions import * ################################################################# # Tests",
"Expected snv_expect = [11007, 11010] ins_expect = [] del_expect = [] # Tests",
"assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_1_single(): ''' '''",
"['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '17', 'ncores': None, 'abthr': None,",
"= [11010] ins_expect = [] del_expect = [] # Tests bit = bitarray.bitarray(11030",
"#end def ################################################################# # Errors ################################################################# def test_run_toBig_rdthr_2_all_miss_pos(): ''' ''' # Variables args",
"'rdthr': None, 'ncores': '2', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args)",
"Errors ################################################################# def test_run_toBig_rdthr_2_all_miss_pos(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',",
"'ncores': '1', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected",
"Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_2(): ''' ''' # Variables args = {'file':",
"'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007] ins_expect =",
"# Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2(): ''' ''' # Variables args =",
"################################################################# # Libraries ################################################################# import sys, os import pytest import bitarray from granite.toBig",
"sys, os import pytest import bitarray from granite.toBig import ( main as main_toBig",
"#end def def test_run_toBig_abthr_25_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',",
"del_expect: bit[i] = True #end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out')",
"bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2(): ''' ''' # Variables args",
"in snv_expect: bit[i] = True #end for assert big_dict['13_snv'][:11031] == bit # Check",
"Run main_toBig(args) # Expected snv_expect = [] ins_expect = [] del_expect = []",
"Expected snv_expect = [11007] ins_expect = [] del_expect = [] # Tests bit",
"= [] # Tests bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out') #",
"'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions',",
"Run main_toBig(args) # Expected snv_expect = [11001, 11007, 11010] ins_expect = [11005, 11022]",
"big_dict['13_ins'][:11031] == bit # Check del bit.setall(False) for i in del_expect: bit[i] =",
"'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args)",
"Run main_toBig(args) # Expected snv_expect = [11001, 11002, 11007, 11010, 11013, 11023] ins_expect",
"'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} #",
"# Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '1', 'rdthr': '2', 'ncores':",
"file: position 13:11006 in file tests/files/input_toBig_miss_pos.rck.gz is not consistent with other files\\n' #end",
"ins_expect = [] del_expect = [] # Tests bit = bitarray.bitarray(11030 + 1)",
"None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run and Tests with pytest.raises(Exception) as e:",
"None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007,",
"['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': '25',",
"['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '2', 'ncores': '2', 'abthr': None,",
"bit # Clean os.remove('tests/files/main_test.out') #end def ################################################################# # Errors ################################################################# def test_run_toBig_rdthr_2_all_miss_pos(): '''",
"'tests/files/main_test.out', 'fithr': '1', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'}",
"'2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run and Tests",
"None, 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect",
"bit # Check del bit.setall(False) for i in del_expect: bit[i] = True #end",
"big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_2(): ''' ''' #",
"for assert big_dict['13_snv'][:11031] == bit # Check ins bit.setall(False) for i in ins_expect:",
"def test_run_toBig_abthr_25_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile':",
"= bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out') # Check snv bit.setall(False) for i",
"11013] ins_expect = [11022] del_expect = [11030] # Tests bit = bitarray.bitarray(11030 +",
"'2', 'rdthr': '17', 'ncores': '1', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run",
"args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': None, 'ncores':",
"== bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_1_single(): ''' ''' # Variables",
"assert main_toBig(args) assert str(e.value) == '\\nERROR in file: position 13:11006 in file tests/files/input_toBig_miss_pos.rck.gz",
"ins_expect: bit[i] = True #end for assert big_dict['13_ins'][:11031] == bit # Check del",
"bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out') # Check snv bit.setall(False) for i in",
"'rdthr': '17', 'ncores': None, 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args)",
"e: assert main_toBig(args) assert str(e.value) == '\\nERROR in file: position 13:11006 in file",
"args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores':",
"# Run main_toBig(args) # Expected snv_expect = [11007, 11010] ins_expect = [] del_expect",
"# Run main_toBig(args) # Expected snv_expect = [11001, 11007, 11010] ins_expect = [11005,",
"bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2_single(): ''' ''' # Variables args",
"################################################################# import sys, os import pytest import bitarray from granite.toBig import ( main",
"# Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores':",
"def test_run_toBig_rdthr_17_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile':",
"= [11017, 11030] # Tests bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out')",
"{'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr':",
"#end def def test_run_toBig_rdthr_17_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',",
"main_toBig(args) assert str(e.value) == '\\nERROR in file: position 13:11006 in file tests/files/input_toBig_miss_pos.rck.gz is",
"# Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_15_all(): ''' ''' # Variables args =",
"{'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '17', 'ncores': None, 'abthr':",
"bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_15_all(): ''' ''' # Variables args",
"test_run_toBig_rdthr_2_1_single(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '1',",
"Run main_toBig(args) # Expected snv_expect = [11010] ins_expect = [] del_expect = []",
"#end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_15_all():",
"# Tests ################################################################# def test_run_toBig_rdthr_2_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz',",
"'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '4', 'rdthr': '2', 'ncores': '2', 'abthr': None,",
"str(e.value) == '\\nERROR in file: position 13:11006 in file tests/files/input_toBig_miss_pos.rck.gz is not consistent",
"for i in snv_expect: bit[i] = True #end for assert big_dict['13_snv'][:11031] == bit",
"main_toBig(args) # Expected snv_expect = [11001, 11007, 11010] ins_expect = [11005, 11022] del_expect",
"'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect =",
"'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007, 11010]",
"assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_15_all(): ''' '''",
"'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11010] ins_expect = []",
"# Errors ################################################################# def test_run_toBig_rdthr_2_all_miss_pos(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz',",
"test_run_toBig_abthr_15_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',",
"'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile':",
"Tests bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out') # Check snv bit.setall(False)",
"[] # Tests bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out') # Check",
"for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_2(): '''",
"Check snv bit.setall(False) for i in snv_expect: bit[i] = True #end for assert",
"Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '17',",
"'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11001, 11007,",
"def test_run_toBig_rdthr_2_2_single(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr':",
"'4', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run",
"#end for assert big_dict['13_snv'][:11031] == bit # Check ins bit.setall(False) for i in",
"# Run main_toBig(args) # Expected snv_expect = [11007, 11010, 11013] ins_expect = [11022]",
"test_run_toBig_abthr_25_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',",
"= [11030] # Tests bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out') #",
"# Run main_toBig(args) # Expected snv_expect = [11010] ins_expect = [] del_expect =",
"'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007] ins_expect",
"assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2_single(): ''' '''",
"Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_1_single(): ''' ''' # Variables args = {'file':",
"'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '17', 'ncores': '1', 'abthr': None, 'regionfile':",
"[11007, 11010] ins_expect = [] del_expect = [] # Tests bit = bitarray.bitarray(11030",
"# Run main_toBig(args) # Expected snv_expect = [] ins_expect = [] del_expect =",
"'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run and Tests with pytest.raises(Exception) as e: assert",
"'17', 'ncores': None, 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) #",
"'\\nERROR in file: position 13:11006 in file tests/files/input_toBig_miss_pos.rck.gz is not consistent with other",
"'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '4', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions',",
"[11017, 11030] # Tests bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out') #",
"ins bit.setall(False) for i in ins_expect: bit[i] = True #end for assert big_dict['13_ins'][:11031]",
"'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007, 11010] ins_expect = []",
"bitarray from granite.toBig import ( main as main_toBig ) from granite.lib.shared_functions import *",
"'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11001, 11002, 11007, 11010,",
"Run main_toBig(args) # Expected snv_expect = [11007, 11010] ins_expect = [] del_expect =",
"del_expect = [] # Tests bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out')",
"# Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr':",
"= [] ins_expect = [] del_expect = [] # Tests bit = bitarray.bitarray(11030",
"[11005, 11022] del_expect = [11017, 11030] # Tests bit = bitarray.bitarray(11030 + 1)",
"True #end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def",
"assert big_dict['13_snv'][:11031] == bit # Check ins bit.setall(False) for i in ins_expect: bit[i]",
"snv_expect: bit[i] = True #end for assert big_dict['13_snv'][:11031] == bit # Check ins",
"for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def ################################################################# # Errors",
"Expected snv_expect = [11001, 11007, 11010] ins_expect = [11005, 11022] del_expect = [11017,",
"'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '17', 'ncores': None, 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile':",
"position 13:11006 in file tests/files/input_toBig_miss_pos.rck.gz is not consistent with other files\\n' #end def",
"'ncores': None, 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected",
"'2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect",
"= True #end for assert big_dict['13_ins'][:11031] == bit # Check del bit.setall(False) for",
"bit[i] = True #end for assert big_dict['13_snv'][:11031] == bit # Check ins bit.setall(False)",
"= load_big('tests/files/main_test.out') # Check snv bit.setall(False) for i in snv_expect: bit[i] = True",
"= [11022] del_expect = [11030] # Tests bit = bitarray.bitarray(11030 + 1) big_dict",
"for i in ins_expect: bit[i] = True #end for assert big_dict['13_ins'][:11031] == bit",
"None, 'ncores': '1', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) #",
"Expected snv_expect = [11007, 11010, 11013] ins_expect = [11022] del_expect = [11030] #",
"main_toBig ) from granite.lib.shared_functions import * ################################################################# # Tests ################################################################# def test_run_toBig_rdthr_2_all(): '''",
"'3', 'rdthr': None, 'ncores': '2', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run",
"'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [] ins_expect",
"['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '17', 'ncores': '1', 'abthr': None,",
"'3', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run",
"Clean os.remove('tests/files/main_test.out') #end def ################################################################# # Errors ################################################################# def test_run_toBig_rdthr_2_all_miss_pos(): ''' ''' #",
"big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_2(): ''' ''' #",
"'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007] ins_expect = [] del_expect",
"# Tests bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out') # Check snv",
"# Run and Tests with pytest.raises(Exception) as e: assert main_toBig(args) assert str(e.value) ==",
"'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected",
"'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007] ins_expect = []",
"#end def def test_run_toBig_abthr_15_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',",
"11002, 11007, 11010, 11013, 11023] ins_expect = [11005, 11022] del_expect = [11017, 11030]",
"'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007, 11010, 11013] ins_expect",
"for i in del_expect: bit[i] = True #end for assert big_dict['13_del'][:11031] == bit",
"os import pytest import bitarray from granite.toBig import ( main as main_toBig )",
"'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': None, 'ncores': '1', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile':",
"'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11010] ins_expect = [] del_expect",
"# Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2_single(): ''' ''' # Variables args =",
"'2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) #",
"''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr':",
"[] del_expect = [] # Tests bit = bitarray.bitarray(11030 + 1) big_dict =",
"def def test_run_toBig_rdthr_17_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'],",
"snv_expect = [11007, 11010] ins_expect = [] del_expect = [] # Tests bit",
"for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2_single(): '''",
"args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '2', 'ncores':",
"'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'}",
"assert big_dict['13_ins'][:11031] == bit # Check del bit.setall(False) for i in del_expect: bit[i]",
"''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr':",
"'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': None, 'ncores': '1', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions',",
"['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None,",
"test_run_toBig_rdthr_2_all_miss_pos(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile':",
"'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run and Tests with pytest.raises(Exception) as",
"'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile':",
"in del_expect: bit[i] = True #end for assert big_dict['13_del'][:11031] == bit # Clean",
"import sys, os import pytest import bitarray from granite.toBig import ( main as",
"i in del_expect: bit[i] = True #end for assert big_dict['13_del'][:11031] == bit #",
"'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run and Tests with",
"'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile':",
"[11022] del_expect = [11030] # Tests bit = bitarray.bitarray(11030 + 1) big_dict =",
"# Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_2(): ''' ''' # Variables args =",
"'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11001, 11007, 11010] ins_expect",
"# Expected snv_expect = [11007, 11010] ins_expect = [] del_expect = [] #",
"bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_all(): ''' ''' # Variables args",
"'1', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect",
"'fithr': '4', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} #",
"def def test_run_toBig_rdthr_2_1_single(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out',",
"= {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '4', 'rdthr': '2', 'ncores':",
"for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_15_all(): '''",
"'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11010] ins_expect =",
"#end for assert big_dict['13_ins'][:11031] == bit # Check del bit.setall(False) for i in",
"== bit # Check del bit.setall(False) for i in del_expect: bit[i] = True",
"'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions',",
"Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2',",
"def def test_run_toBig_rdthr_17_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'],",
"os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_1_single(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'],",
"main as main_toBig ) from granite.lib.shared_functions import * ################################################################# # Tests ################################################################# def",
"#end def def test_run_toBig_rdthr_2_2_single(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile':",
"bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_all(): ''' ''' # Variables args",
"'rdthr': None, 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args)",
"from granite.lib.shared_functions import * ################################################################# # Tests ################################################################# def test_run_toBig_rdthr_2_all(): ''' ''' #",
"'2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run and Tests with pytest.raises(Exception)",
"'fithr': '3', 'rdthr': '17', 'ncores': None, 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} #",
"# Check snv bit.setall(False) for i in snv_expect: bit[i] = True #end for",
"snv bit.setall(False) for i in snv_expect: bit[i] = True #end for assert big_dict['13_snv'][:11031]",
"'fithr': '2', 'rdthr': None, 'ncores': '1', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} #",
"{'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr':",
"* ################################################################# # Tests ################################################################# def test_run_toBig_rdthr_2_all(): ''' ''' # Variables args =",
"'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect =",
"args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores':",
"[11010] ins_expect = [] del_expect = [] # Tests bit = bitarray.bitarray(11030 +",
"big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def ################################################################# # Errors ################################################################# def",
"bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out') # Check snv bit.setall(False) for",
"bit # Check ins bit.setall(False) for i in ins_expect: bit[i] = True #end",
"os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz',",
"args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '17', 'ncores':",
"'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11001, 11002, 11007, 11010, 11013,",
"snv_expect = [11007, 11010, 11013] ins_expect = [11022] del_expect = [11030] # Tests",
"test_run_toBig_abthr_25_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',",
"main_toBig(args) # Expected snv_expect = [] ins_expect = [] del_expect = [] #",
"bit[i] = True #end for assert big_dict['13_ins'][:11031] == bit # Check del bit.setall(False)",
"#end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_2():",
"'2', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect",
"== bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2_single(): ''' ''' # Variables",
"main_toBig(args) # Expected snv_expect = [11001, 11002, 11007, 11010, 11013, 11023] ins_expect =",
"big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_all(): ''' ''' #",
"'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11007, 11010] ins_expect =",
"Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': None,",
"# Clean os.remove('tests/files/main_test.out') #end def ################################################################# # Errors ################################################################# def test_run_toBig_rdthr_2_all_miss_pos(): ''' '''",
"'ncores': '2', 'abthr': '25', 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected",
"'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run main_toBig(args) # Expected snv_expect = [11001, 11002, 11007,",
"['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': None,",
"11010, 11013] ins_expect = [11022] del_expect = [11030] # Tests bit = bitarray.bitarray(11030",
"################################################################# def test_run_toBig_rdthr_2_all_miss_pos(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz',",
"def def test_run_toBig_rdthr_2_2(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'],",
"for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_all(): '''",
"Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2(): ''' ''' # Variables args = {'file':",
"def test_run_toBig_rdthr_2_1_single(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr':",
"{'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '2', 'ncores': '2', 'abthr':",
"Variables args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '17',",
"{'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '4', 'rdthr': '2', 'ncores': '2',",
"os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_all(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz',",
"snv_expect = [11001, 11007, 11010] ins_expect = [11005, 11022] del_expect = [11017, 11030]",
"Check ins bit.setall(False) for i in ins_expect: bit[i] = True #end for assert",
"# Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_17_all(): ''' ''' # Variables args =",
"snv_expect = [] ins_expect = [] del_expect = [] # Tests bit =",
"ins_expect = [11022] del_expect = [11030] # Tests bit = bitarray.bitarray(11030 + 1)",
"#end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def ################################################################# #",
"['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '1', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions',",
"{'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile':",
"bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_abthr_25_2(): ''' ''' # Variables args",
"== bit # Check ins bit.setall(False) for i in ins_expect: bit[i] = True",
"Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '1', 'rdthr': '2', 'ncores': '2',",
"Run main_toBig(args) # Expected snv_expect = [11007] ins_expect = [] del_expect = []",
"ins_expect = [11005, 11022] del_expect = [11017, 11030] # Tests bit = bitarray.bitarray(11030",
"['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions',",
"11030] # Tests bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out') # Check",
"11007, 11010, 11013, 11023] ins_expect = [11005, 11022] del_expect = [11017, 11030] #",
"args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '1', 'rdthr': '2', 'ncores': '2', 'abthr':",
"'1', 'rdthr': '2', 'ncores': '2', 'abthr': None, 'regionfile': 'tests/files/input_toBig.regions', 'chromfile': 'tests/files/input_toBig.chrom.size'} # Run",
"= {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz', 'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out', 'fithr': '3', 'rdthr': '2', 'ncores': '2',",
"del_expect = [11030] # Tests bit = bitarray.bitarray(11030 + 1) big_dict = load_big('tests/files/main_test.out')",
"# Check ins bit.setall(False) for i in ins_expect: bit[i] = True #end for",
"Run main_toBig(args) # Expected snv_expect = [11007, 11010, 11013] ins_expect = [11022] del_expect",
"def def test_run_toBig_rdthr_2_2_single(): ''' ''' # Variables args = {'file': ['tests/files/input_toBig_1.rck.gz'], 'outputfile': 'tests/files/main_test.out',",
"= [11007] ins_expect = [] del_expect = [] # Tests bit = bitarray.bitarray(11030",
"#end for assert big_dict['13_del'][:11031] == bit # Clean os.remove('tests/files/main_test.out') #end def def test_run_toBig_rdthr_2_2():",
"main_toBig(args) # Expected snv_expect = [11010] ins_expect = [] del_expect = [] #",
"# Expected snv_expect = [11010] ins_expect = [] del_expect = [] # Tests",
"i in snv_expect: bit[i] = True #end for assert big_dict['13_snv'][:11031] == bit #",
"snv_expect = [11007] ins_expect = [] del_expect = [] # Tests bit =",
"def ################################################################# # Errors ################################################################# def test_run_toBig_rdthr_2_all_miss_pos(): ''' ''' # Variables args ="
] |
[] |
[
"driver.find_elements_by_class_name(\"nick\") counts = driver.find_elements_by_class_name(\"js-num\") for name, count in zip(names,counts): print(name.text,\":\",count.text) if driver.page_source.find(\"laypage_next\") !=",
"driver = webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\") while True: names = driver.find_elements_by_class_name(\"nick\") counts = driver.find_elements_by_class_name(\"js-num\") for",
"name, count in zip(names,counts): print(name.text,\":\",count.text) if driver.page_source.find(\"laypage_next\") != -1: driver.find_element_by_class_name(\"laypage_next\").click() sleep(3) else: break",
"driver.find_elements_by_class_name(\"js-num\") for name, count in zip(names,counts): print(name.text,\":\",count.text) if driver.page_source.find(\"laypage_next\") != -1: driver.find_element_by_class_name(\"laypage_next\").click() sleep(3)",
"sleep driver = webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\") while True: names = driver.find_elements_by_class_name(\"nick\") counts = driver.find_elements_by_class_name(\"js-num\")",
"from selenium import webdriver from time import sleep driver = webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\") while",
"counts = driver.find_elements_by_class_name(\"js-num\") for name, count in zip(names,counts): print(name.text,\":\",count.text) if driver.page_source.find(\"laypage_next\") != -1:",
"import sleep driver = webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\") while True: names = driver.find_elements_by_class_name(\"nick\") counts =",
"= driver.find_elements_by_class_name(\"js-num\") for name, count in zip(names,counts): print(name.text,\":\",count.text) if driver.page_source.find(\"laypage_next\") != -1: driver.find_element_by_class_name(\"laypage_next\").click()",
"= driver.find_elements_by_class_name(\"nick\") counts = driver.find_elements_by_class_name(\"js-num\") for name, count in zip(names,counts): print(name.text,\":\",count.text) if driver.page_source.find(\"laypage_next\")",
"count in zip(names,counts): print(name.text,\":\",count.text) if driver.page_source.find(\"laypage_next\") != -1: driver.find_element_by_class_name(\"laypage_next\").click() sleep(3) else: break driver.quit()",
"for name, count in zip(names,counts): print(name.text,\":\",count.text) if driver.page_source.find(\"laypage_next\") != -1: driver.find_element_by_class_name(\"laypage_next\").click() sleep(3) else:",
"time import sleep driver = webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\") while True: names = driver.find_elements_by_class_name(\"nick\") counts",
"driver.get(\"https://www.huya.com/g/lol\") while True: names = driver.find_elements_by_class_name(\"nick\") counts = driver.find_elements_by_class_name(\"js-num\") for name, count in",
"import webdriver from time import sleep driver = webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\") while True: names",
"from time import sleep driver = webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\") while True: names = driver.find_elements_by_class_name(\"nick\")",
"webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\") while True: names = driver.find_elements_by_class_name(\"nick\") counts = driver.find_elements_by_class_name(\"js-num\") for name, count",
"while True: names = driver.find_elements_by_class_name(\"nick\") counts = driver.find_elements_by_class_name(\"js-num\") for name, count in zip(names,counts):",
"selenium import webdriver from time import sleep driver = webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\") while True:",
"<reponame>littleturings/2021PythonWebCrawler from selenium import webdriver from time import sleep driver = webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\")",
"names = driver.find_elements_by_class_name(\"nick\") counts = driver.find_elements_by_class_name(\"js-num\") for name, count in zip(names,counts): print(name.text,\":\",count.text) if",
"True: names = driver.find_elements_by_class_name(\"nick\") counts = driver.find_elements_by_class_name(\"js-num\") for name, count in zip(names,counts): print(name.text,\":\",count.text)",
"webdriver from time import sleep driver = webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\") while True: names =",
"= webdriver.Chrome() driver.get(\"https://www.huya.com/g/lol\") while True: names = driver.find_elements_by_class_name(\"nick\") counts = driver.find_elements_by_class_name(\"js-num\") for name,"
] |
[
"django.conf.urls import url from django.views.generic import ListView, DetailView from models import Notas from",
"DetailView from models import Notas from .views import * urlpatterns = [ url(r'^$',",
"url(r'^$', 'lista_notas', name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais, name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes, name=\"notas_list_copartes\"), # url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota', name='comentar-nota'),",
"url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota', name='comentar-nota'), url(r'^(?P<id>\\d+)/$', nota_detail, name='notas-detail'), url(r'^crear/$', crear_nota, name=\"crear-nota\"), # url(r'^editar/(?P<id>\\d+)/$', 'editar_nota', name='editar-nota'),",
"name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes, name=\"notas_list_copartes\"), # url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota', name='comentar-nota'), url(r'^(?P<id>\\d+)/$', nota_detail, name='notas-detail'), url(r'^crear/$', crear_nota,",
"name=\"notas_list_copartes\"), # url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota', name='comentar-nota'), url(r'^(?P<id>\\d+)/$', nota_detail, name='notas-detail'), url(r'^crear/$', crear_nota, name=\"crear-nota\"), # url(r'^editar/(?P<id>\\d+)/$',",
"from django.views.generic import ListView, DetailView from models import Notas from .views import *",
"from django.conf.urls import url from django.views.generic import ListView, DetailView from models import Notas",
"django.views.generic import ListView, DetailView from models import Notas from .views import * urlpatterns",
"import ListView, DetailView from models import Notas from .views import * urlpatterns =",
"models import Notas from .views import * urlpatterns = [ url(r'^$', list_notas,name='notas_list'), #",
"'comentar_nota', name='comentar-nota'), url(r'^(?P<id>\\d+)/$', nota_detail, name='notas-detail'), url(r'^crear/$', crear_nota, name=\"crear-nota\"), # url(r'^editar/(?P<id>\\d+)/$', 'editar_nota', name='editar-nota'), #",
"nota_detail, name='notas-detail'), url(r'^crear/$', crear_nota, name=\"crear-nota\"), # url(r'^editar/(?P<id>\\d+)/$', 'editar_nota', name='editar-nota'), # url(r'^borrar/(?P<id>\\d+)/$', 'borrar_nota', name='borrar-nota'),",
".views import * urlpatterns = [ url(r'^$', list_notas,name='notas_list'), # url(r'^$', 'lista_notas', name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$',",
"crear_nota, name=\"crear-nota\"), # url(r'^editar/(?P<id>\\d+)/$', 'editar_nota', name='editar-nota'), # url(r'^borrar/(?P<id>\\d+)/$', 'borrar_nota', name='borrar-nota'), url(r'^imagenes/$', ver_imagenes, name=\"imagenes-nota\"),",
"* urlpatterns = [ url(r'^$', list_notas,name='notas_list'), # url(r'^$', 'lista_notas', name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais, name=\"notas_list_pais\"),",
"url(r'^(?P<id>\\d+)/$', nota_detail, name='notas-detail'), url(r'^crear/$', crear_nota, name=\"crear-nota\"), # url(r'^editar/(?P<id>\\d+)/$', 'editar_nota', name='editar-nota'), # url(r'^borrar/(?P<id>\\d+)/$', 'borrar_nota',",
"'editar_nota', name='editar-nota'), # url(r'^borrar/(?P<id>\\d+)/$', 'borrar_nota', name='borrar-nota'), url(r'^imagenes/$', ver_imagenes, name=\"imagenes-nota\"), url(r'^videos/$', ver_videos, name=\"videos-nota\"), ]",
"Notas from .views import * urlpatterns = [ url(r'^$', list_notas,name='notas_list'), # url(r'^$', 'lista_notas',",
"lista_notas_copartes, name=\"notas_list_copartes\"), # url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota', name='comentar-nota'), url(r'^(?P<id>\\d+)/$', nota_detail, name='notas-detail'), url(r'^crear/$', crear_nota, name=\"crear-nota\"), #",
"name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais, name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes, name=\"notas_list_copartes\"), # url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota', name='comentar-nota'), url(r'^(?P<id>\\d+)/$', nota_detail,",
"[ url(r'^$', list_notas,name='notas_list'), # url(r'^$', 'lista_notas', name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais, name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes, name=\"notas_list_copartes\"),",
"import Notas from .views import * urlpatterns = [ url(r'^$', list_notas,name='notas_list'), # url(r'^$',",
"from models import Notas from .views import * urlpatterns = [ url(r'^$', list_notas,name='notas_list'),",
"# url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota', name='comentar-nota'), url(r'^(?P<id>\\d+)/$', nota_detail, name='notas-detail'), url(r'^crear/$', crear_nota, name=\"crear-nota\"), # url(r'^editar/(?P<id>\\d+)/$', 'editar_nota',",
"ListView, DetailView from models import Notas from .views import * urlpatterns = [",
"lista_notas_pais, name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes, name=\"notas_list_copartes\"), # url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota', name='comentar-nota'), url(r'^(?P<id>\\d+)/$', nota_detail, name='notas-detail'), url(r'^crear/$',",
"list_notas,name='notas_list'), # url(r'^$', 'lista_notas', name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais, name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes, name=\"notas_list_copartes\"), # url(r'^ver/(?P<id>\\d+)/$',",
"= [ url(r'^$', list_notas,name='notas_list'), # url(r'^$', 'lista_notas', name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais, name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes,",
"name=\"crear-nota\"), # url(r'^editar/(?P<id>\\d+)/$', 'editar_nota', name='editar-nota'), # url(r'^borrar/(?P<id>\\d+)/$', 'borrar_nota', name='borrar-nota'), url(r'^imagenes/$', ver_imagenes, name=\"imagenes-nota\"), url(r'^videos/$',",
"url(r'^$', list_notas,name='notas_list'), # url(r'^$', 'lista_notas', name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais, name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes, name=\"notas_list_copartes\"), #",
"name='notas-detail'), url(r'^crear/$', crear_nota, name=\"crear-nota\"), # url(r'^editar/(?P<id>\\d+)/$', 'editar_nota', name='editar-nota'), # url(r'^borrar/(?P<id>\\d+)/$', 'borrar_nota', name='borrar-nota'), url(r'^imagenes/$',",
"import * urlpatterns = [ url(r'^$', list_notas,name='notas_list'), # url(r'^$', 'lista_notas', name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais,",
"url from django.views.generic import ListView, DetailView from models import Notas from .views import",
"urlpatterns = [ url(r'^$', list_notas,name='notas_list'), # url(r'^$', 'lista_notas', name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais, name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$',",
"# url(r'^$', 'lista_notas', name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais, name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes, name=\"notas_list_copartes\"), # url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota',",
"name='comentar-nota'), url(r'^(?P<id>\\d+)/$', nota_detail, name='notas-detail'), url(r'^crear/$', crear_nota, name=\"crear-nota\"), # url(r'^editar/(?P<id>\\d+)/$', 'editar_nota', name='editar-nota'), # url(r'^borrar/(?P<id>\\d+)/$',",
"url(r'^crear/$', crear_nota, name=\"crear-nota\"), # url(r'^editar/(?P<id>\\d+)/$', 'editar_nota', name='editar-nota'), # url(r'^borrar/(?P<id>\\d+)/$', 'borrar_nota', name='borrar-nota'), url(r'^imagenes/$', ver_imagenes,",
"url(r'^editar/(?P<id>\\d+)/$', 'editar_nota', name='editar-nota'), # url(r'^borrar/(?P<id>\\d+)/$', 'borrar_nota', name='borrar-nota'), url(r'^imagenes/$', ver_imagenes, name=\"imagenes-nota\"), url(r'^videos/$', ver_videos, name=\"videos-nota\"),",
"url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais, name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes, name=\"notas_list_copartes\"), # url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota', name='comentar-nota'), url(r'^(?P<id>\\d+)/$', nota_detail, name='notas-detail'),",
"url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes, name=\"notas_list_copartes\"), # url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota', name='comentar-nota'), url(r'^(?P<id>\\d+)/$', nota_detail, name='notas-detail'), url(r'^crear/$', crear_nota, name=\"crear-nota\"),",
"# url(r'^editar/(?P<id>\\d+)/$', 'editar_nota', name='editar-nota'), # url(r'^borrar/(?P<id>\\d+)/$', 'borrar_nota', name='borrar-nota'), url(r'^imagenes/$', ver_imagenes, name=\"imagenes-nota\"), url(r'^videos/$', ver_videos,",
"import url from django.views.generic import ListView, DetailView from models import Notas from .views",
"'lista_notas', name=\"notas_list\"), url(r'^pais/(?P<id>\\d+)/$', lista_notas_pais, name=\"notas_list_pais\"), url(r'^coparte/(?P<id>\\d+)/$', lista_notas_copartes, name=\"notas_list_copartes\"), # url(r'^ver/(?P<id>\\d+)/$', 'comentar_nota', name='comentar-nota'), url(r'^(?P<id>\\d+)/$',",
"from .views import * urlpatterns = [ url(r'^$', list_notas,name='notas_list'), # url(r'^$', 'lista_notas', name=\"notas_list\"),"
] |
[
"the # pipe you want to use. Make sure the pipe you want",
"pass # DirectX 8 #try: # import libpandadx8 #except: # pass # OpenGL",
"8 #try: # import libpandadx8 #except: # pass # OpenGL try: import libpandagl",
"pipes to load with my # hacked up Panda3D. If you want to",
"get the graphics pipes to load with my # hacked up Panda3D. If",
"a hack fix to get the graphics pipes to load with my #",
"sure the pipe you want to load # first is imported first. #",
"# pipe you want to use. Make sure the pipe you want to",
"fix to get the graphics pipes to load with my # hacked up",
"hack fix to get the graphics pipes to load with my # hacked",
"load # first is imported first. # DirectX 9 #try: # import libpandadx9",
"graphics pipes to load with my # hacked up Panda3D. If you want",
"load the level editor # using DirectX 8 or DirectX 9, uncomment the",
"DirectX 8 #try: # import libpandadx8 #except: # pass # OpenGL try: import",
"the import for the # pipe you want to use. Make sure the",
"you want to use. Make sure the pipe you want to load #",
"# pass # OpenGL try: import libpandagl except: pass print 'DirectStart: Starting the",
"pass # OpenGL try: import libpandagl except: pass print 'DirectStart: Starting the game.'",
"import libpandadx8 #except: # pass # OpenGL try: import libpandagl except: pass print",
"up Panda3D. If you want to load the level editor # using DirectX",
"want to use. Make sure the pipe you want to load # first",
"my # hacked up Panda3D. If you want to load the level editor",
"Panda3D. If you want to load the level editor # using DirectX 8",
"# OpenGL try: import libpandagl except: pass print 'DirectStart: Starting the game.' from",
"to load # first is imported first. # DirectX 9 #try: # import",
"#try: # import libpandadx8 #except: # pass # OpenGL try: import libpandagl except:",
"DirectX 8 or DirectX 9, uncomment the import for the # pipe you",
"you want to load # first is imported first. # DirectX 9 #try:",
"# using DirectX 8 or DirectX 9, uncomment the import for the #",
"8 or DirectX 9, uncomment the import for the # pipe you want",
"pipe you want to load # first is imported first. # DirectX 9",
"DirectX 9, uncomment the import for the # pipe you want to use.",
"or DirectX 9, uncomment the import for the # pipe you want to",
"libpandadx9 #except: # pass # DirectX 8 #try: # import libpandadx8 #except: #",
"uncomment the import for the # pipe you want to use. Make sure",
"the pipe you want to load # first is imported first. # DirectX",
"libpandadx8 #except: # pass # OpenGL try: import libpandagl except: pass print 'DirectStart:",
"using DirectX 8 or DirectX 9, uncomment the import for the # pipe",
"#try: # import libpandadx9 #except: # pass # DirectX 8 #try: # import",
"# first is imported first. # DirectX 9 #try: # import libpandadx9 #except:",
"# This is a hack fix to get the graphics pipes to load",
"first is imported first. # DirectX 9 #try: # import libpandadx9 #except: #",
"#except: # pass # OpenGL try: import libpandagl except: pass print 'DirectStart: Starting",
"import for the # pipe you want to use. Make sure the pipe",
"OpenGL try: import libpandagl except: pass print 'DirectStart: Starting the game.' from direct.showbase",
"import libpandadx9 #except: # pass # DirectX 8 #try: # import libpandadx8 #except:",
"load with my # hacked up Panda3D. If you want to load the",
"# DirectX 9 #try: # import libpandadx9 #except: # pass # DirectX 8",
"first. # DirectX 9 #try: # import libpandadx9 #except: # pass # DirectX",
"for the # pipe you want to use. Make sure the pipe you",
"This is a hack fix to get the graphics pipes to load with",
"to get the graphics pipes to load with my # hacked up Panda3D.",
"to load the level editor # using DirectX 8 or DirectX 9, uncomment",
"want to load the level editor # using DirectX 8 or DirectX 9,",
"pass print 'DirectStart: Starting the game.' from direct.showbase import ShowBase base = ShowBase.ShowBase()",
"level editor # using DirectX 8 or DirectX 9, uncomment the import for",
"# DirectX 8 #try: # import libpandadx8 #except: # pass # OpenGL try:",
"except: pass print 'DirectStart: Starting the game.' from direct.showbase import ShowBase base =",
"is a hack fix to get the graphics pipes to load with my",
"use. Make sure the pipe you want to load # first is imported",
"libpandagl except: pass print 'DirectStart: Starting the game.' from direct.showbase import ShowBase base",
"import libpandagl except: pass print 'DirectStart: Starting the game.' from direct.showbase import ShowBase",
"DirectX 9 #try: # import libpandadx9 #except: # pass # DirectX 8 #try:",
"# import libpandadx9 #except: # pass # DirectX 8 #try: # import libpandadx8",
"# import libpandadx8 #except: # pass # OpenGL try: import libpandagl except: pass",
"imported first. # DirectX 9 #try: # import libpandadx9 #except: # pass #",
"to load with my # hacked up Panda3D. If you want to load",
"try: import libpandagl except: pass print 'DirectStart: Starting the game.' from direct.showbase import",
"pipe you want to use. Make sure the pipe you want to load",
"the graphics pipes to load with my # hacked up Panda3D. If you",
"# pass # DirectX 8 #try: # import libpandadx8 #except: # pass #",
"If you want to load the level editor # using DirectX 8 or",
"#except: # pass # DirectX 8 #try: # import libpandadx8 #except: # pass",
"# hacked up Panda3D. If you want to load the level editor #",
"editor # using DirectX 8 or DirectX 9, uncomment the import for the",
"the level editor # using DirectX 8 or DirectX 9, uncomment the import",
"you want to load the level editor # using DirectX 8 or DirectX",
"hacked up Panda3D. If you want to load the level editor # using",
"Make sure the pipe you want to load # first is imported first.",
"with my # hacked up Panda3D. If you want to load the level",
"to use. Make sure the pipe you want to load # first is",
"<gh_stars>1-10 # This is a hack fix to get the graphics pipes to",
"9, uncomment the import for the # pipe you want to use. Make",
"want to load # first is imported first. # DirectX 9 #try: #",
"is imported first. # DirectX 9 #try: # import libpandadx9 #except: # pass",
"9 #try: # import libpandadx9 #except: # pass # DirectX 8 #try: #"
] |
[
": ')) except Exception as e: print('Harus pakek angka ya') else: print(f'Halo {get_nama.capitalize()}!')",
"')) except Exception as e: print('Harus pakek angka ya') else: print(f'Halo {get_nama.capitalize()}!') print(f'Umur",
"input('Masukan nama : ') try: get_umur = int(input('Masukan umur : ')) except Exception",
"= input('Masukan nama : ') try: get_umur = int(input('Masukan umur : ')) except",
"get_umur = int(input('Masukan umur : ')) except Exception as e: print('Harus pakek angka",
"') try: get_umur = int(input('Masukan umur : ')) except Exception as e: print('Harus",
"nama : ') try: get_umur = int(input('Masukan umur : ')) except Exception as",
"int(input('Masukan umur : ')) except Exception as e: print('Harus pakek angka ya') else:",
"try: get_umur = int(input('Masukan umur : ')) except Exception as e: print('Harus pakek",
": ') try: get_umur = int(input('Masukan umur : ')) except Exception as e:",
"= int(input('Masukan umur : ')) except Exception as e: print('Harus pakek angka ya')",
"umur : ')) except Exception as e: print('Harus pakek angka ya') else: print(f'Halo",
"except Exception as e: print('Harus pakek angka ya') else: print(f'Halo {get_nama.capitalize()}!') print(f'Umur kamu",
"get_nama = input('Masukan nama : ') try: get_umur = int(input('Masukan umur : '))",
"Exception as e: print('Harus pakek angka ya') else: print(f'Halo {get_nama.capitalize()}!') print(f'Umur kamu {str(get_umur)}')"
] |
[
"= np.zeros([len(y_list), hp.maxlen], np.int32) for i, (x, y) in enumerate(zip(x_list, y_list)): X[i] =",
"= [], [], [], [] for source_sent, target_sent in zip(source_sents, target_sents): x =",
"_preds[:, j] #print(pred) # pred should be length 1 each time due to",
"as np from hyperparams import Hyperparams as hp from data_load import load_test_data, load_de_vocab,",
"<=hp.maxlen: x_list.append(np.array(x)) y_list.append(np.array(y)) Sources.append(source_sent) Targets.append(target_sent) # Pad X = np.zeros([len(x_list), hp.maxlen], np.int32) Y",
"-*- coding: utf-8 -*- #/usr/bin/python2 ''' June 2017 by <NAME>. <EMAIL>. https://www.github.com/kyubyong/transformer '''",
"np.zeros([len(x_list), hp.maxlen], np.int32) Y = np.zeros([len(y_list), hp.maxlen], np.int32) for i, (x, y) in",
"### Get mini-batches x = X[i*hp.batch_size: (i+1)*hp.batch_size] prompt = raw_input() ### Autoregressive inference",
"prompt = raw_input() ### Autoregressive inference preds = np.zeros((hp.batch_size, hp.maxlen), np.int32) for j",
"#print(pred) # pred should be length 1 each time due to the cycling",
"[], [] for source_sent, target_sent in zip(source_sents, target_sents): x = [de2idx.get(word, 1) for",
"enumerate(xlist): X[i] = np.lib.pad(x, [0, hp.maxlen - len(x)], 'constant', constant_values=(0, 0)) list_of_refs, hypotheses",
"(i+1)*hp.batch_size] prompt = raw_input() ### Autoregressive inference preds = np.zeros((hp.batch_size, hp.maxlen), np.int32) for",
"import Graph #from nltk.translate.bleu_score import corpus_bleu def eval(): # Load graph g =",
"hypotheses = [], [] for i in range(len(X) // hp.batch_size): ### Get mini-batches",
"tensorflow as tf import numpy as np from hyperparams import Hyperparams as hp",
"of the while loop in main for pred in preds: got = \"",
"# 1: OOV, </S>: End of Text y = [en2idx.get(word, 1) for word",
"with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: ## Restore parameters sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)) while(True): prompt = raw_input()",
"### Autoregressive inference preds = np.zeros((hp.batch_size, hp.maxlen), np.int32) for j in range(hp.maxlen): #print(\"j:",
"corpus_bleu def eval(): # Load graph g = Graph(is_training=False) print(\"Graph loaded\") # Load",
"hp from data_load import load_test_data, load_de_vocab, load_en_vocab from train import Graph #from nltk.translate.bleu_score",
"'constant', constant_values=(0, 0)) list_of_refs, hypotheses = [], [] for i in range(len(X) //",
"y = [en2idx.get(word, 1) for word in (target_sent + u\" </S>\").split()] if max(len(x),",
"for pred in preds: got = \" \".join(idx2en[idx] for idx in pred).split(\"</S>\")[0].strip() #return",
"time due to the cycling of the while loop in main for pred",
"for word in (source_sent + u\" </S>\").split()] # 1: OOV, </S>: End of",
"hp.maxlen], np.int32) for i, xi in enumerate(xlist): X[i] = np.lib.pad(x, [0, hp.maxlen -",
"preds[:, j] = _preds[:, j] #print(pred) # pred should be length 1 each",
"= \" \".join(idx2en[idx] for idx in pred).split(\"</S>\")[0].strip() #return got print(got) if __name__ ==",
"as sess: ## Restore parameters sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)) while(True): prompt = raw_input() xlist =",
"= X[i*hp.batch_size: (i+1)*hp.batch_size] prompt = raw_input() ### Autoregressive inference preds = np.zeros((hp.batch_size, hp.maxlen),",
"[] xval = [en2idx.get(word, 1) for word in (target_sent + u\" </S>\").split()] if",
"len(y)) <=hp.maxlen: x_list.append(np.array(x)) y_list.append(np.array(y)) Sources.append(source_sent) Targets.append(target_sent) # Pad X = np.zeros([len(x_list), hp.maxlen], np.int32)",
"= load_test_data() \"\"\" x_list, y_list, Sources, Targets = [], [], [], [] for",
"https://www.github.com/kyubyong/transformer ''' from __future__ import print_function import codecs import os import tensorflow as",
"zip(source_sents, target_sents): x = [de2idx.get(word, 1) for word in (source_sent + u\" </S>\").split()]",
"Sources, Targets = load_test_data() \"\"\" x_list, y_list, Sources, Targets = [], [], [],",
"Sources.append(source_sent) Targets.append(target_sent) # Pad X = np.zeros([len(x_list), hp.maxlen], np.int32) Y = np.zeros([len(y_list), hp.maxlen],",
"np.int32) for i, (x, y) in enumerate(zip(x_list, y_list)): X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)],",
"= np.zeros([len(xlist), hp.maxlen], np.int32) for i, xi in enumerate(xlist): X[i] = np.lib.pad(x, [0,",
"[0, hp.maxlen - len(x)], 'constant', constant_values=(0, 0)) list_of_refs, hypotheses = [], [] for",
"as tf import numpy as np from hyperparams import Hyperparams as hp from",
"import Hyperparams as hp from data_load import load_test_data, load_de_vocab, load_en_vocab from train import",
"y_list.append(np.array(y)) Sources.append(source_sent) Targets.append(target_sent) # Pad X = np.zeros([len(x_list), hp.maxlen], np.int32) Y = np.zeros([len(y_list),",
"sv = tf.train.Supervisor() with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: ## Restore parameters sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)) while(True):",
"[], [] for i in range(len(X) // hp.batch_size): ### Get mini-batches x =",
"for j in range(hp.maxlen): #print(\"j: \" + str(j)) _preds = sess.run(g.preds, {g.x: x,",
"the while loop in main for pred in preds: got = \" \".join(idx2en[idx]",
"print_function import codecs import os import tensorflow as tf import numpy as np",
"1) for word in (source_sent + u\" </S>\").split()] # 1: OOV, </S>: End",
"import numpy as np from hyperparams import Hyperparams as hp from data_load import",
"coding: utf-8 -*- #/usr/bin/python2 ''' June 2017 by <NAME>. <EMAIL>. https://www.github.com/kyubyong/transformer ''' from",
"hp.maxlen-len(x)], 'constant', constant_values=(0, 0)) \"\"\" en2idx, idx2en = load_en_vocab() # Start session with",
"2017 by <NAME>. <EMAIL>. https://www.github.com/kyubyong/transformer ''' from __future__ import print_function import codecs import",
"\" \".join(idx2en[idx] for idx in pred).split(\"</S>\")[0].strip() #return got print(got) if __name__ == '__main__':",
"os import tensorflow as tf import numpy as np from hyperparams import Hyperparams",
"utf-8 -*- #/usr/bin/python2 ''' June 2017 by <NAME>. <EMAIL>. https://www.github.com/kyubyong/transformer ''' from __future__",
"X, Sources, Targets = load_test_data() \"\"\" x_list, y_list, Sources, Targets = [], [],",
"x = X[i*hp.batch_size: (i+1)*hp.batch_size] prompt = raw_input() ### Autoregressive inference preds = np.zeros((hp.batch_size,",
"for word in (target_sent + u\" </S>\").split()] if (len(xval) <= hp.maxlen): xlist.append(np.array(xval)) X",
"x_list, y_list, Sources, Targets = [], [], [], [] for source_sent, target_sent in",
"for i, (x, y) in enumerate(zip(x_list, y_list)): X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant',",
"\".join(idx2en[idx] for idx in pred).split(\"</S>\")[0].strip() #return got print(got) if __name__ == '__main__': eval()",
"__future__ import print_function import codecs import os import tensorflow as tf import numpy",
"for source_sent, target_sent in zip(source_sents, target_sents): x = [de2idx.get(word, 1) for word in",
"Targets.append(target_sent) # Pad X = np.zeros([len(x_list), hp.maxlen], np.int32) Y = np.zeros([len(y_list), hp.maxlen], np.int32)",
"i in range(len(X) // hp.batch_size): ### Get mini-batches x = X[i*hp.batch_size: (i+1)*hp.batch_size] prompt",
"hp.batch_size): ### Get mini-batches x = X[i*hp.batch_size: (i+1)*hp.batch_size] prompt = raw_input() ### Autoregressive",
"# pred should be length 1 each time due to the cycling of",
"# -*- coding: utf-8 -*- #/usr/bin/python2 ''' June 2017 by <NAME>. <EMAIL>. https://www.github.com/kyubyong/transformer",
"constant_values=(0, 0)) list_of_refs, hypotheses = [], [] for i in range(len(X) // hp.batch_size):",
"[en2idx.get(word, 1) for word in (target_sent + u\" </S>\").split()] if max(len(x), len(y)) <=hp.maxlen:",
"preds: got = \" \".join(idx2en[idx] for idx in pred).split(\"</S>\")[0].strip() #return got print(got) if",
"word in (target_sent + u\" </S>\").split()] if (len(xval) <= hp.maxlen): xlist.append(np.array(xval)) X =",
"<EMAIL>. https://www.github.com/kyubyong/transformer ''' from __future__ import print_function import codecs import os import tensorflow",
"Get mini-batches x = X[i*hp.batch_size: (i+1)*hp.batch_size] prompt = raw_input() ### Autoregressive inference preds",
"sess.run(g.preds, {g.x: x, g.y: preds}) preds[:, j] = _preds[:, j] #print(pred) # pred",
"np.zeros([len(xlist), hp.maxlen], np.int32) for i, xi in enumerate(xlist): X[i] = np.lib.pad(x, [0, hp.maxlen",
"by <NAME>. <EMAIL>. https://www.github.com/kyubyong/transformer ''' from __future__ import print_function import codecs import os",
"from hyperparams import Hyperparams as hp from data_load import load_test_data, load_de_vocab, load_en_vocab from",
"import os import tensorflow as tf import numpy as np from hyperparams import",
"= raw_input() xlist = [] xval = [en2idx.get(word, 1) for word in (target_sent",
"loaded\") # Load data # X, Sources, Targets = load_test_data() \"\"\" x_list, y_list,",
"in range(len(X) // hp.batch_size): ### Get mini-batches x = X[i*hp.batch_size: (i+1)*hp.batch_size] prompt =",
"np.int32) for j in range(hp.maxlen): #print(\"j: \" + str(j)) _preds = sess.run(g.preds, {g.x:",
"-*- #/usr/bin/python2 ''' June 2017 by <NAME>. <EMAIL>. https://www.github.com/kyubyong/transformer ''' from __future__ import",
"np.zeros([len(y_list), hp.maxlen], np.int32) for i, (x, y) in enumerate(zip(x_list, y_list)): X[i] = np.lib.pad(x,",
"load_de_vocab, load_en_vocab from train import Graph #from nltk.translate.bleu_score import corpus_bleu def eval(): #",
"def eval(): # Load graph g = Graph(is_training=False) print(\"Graph loaded\") # Load data",
"load_test_data, load_de_vocab, load_en_vocab from train import Graph #from nltk.translate.bleu_score import corpus_bleu def eval():",
"# Load data # X, Sources, Targets = load_test_data() \"\"\" x_list, y_list, Sources,",
"parameters sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)) while(True): prompt = raw_input() xlist = [] xval = [en2idx.get(word,",
"while(True): prompt = raw_input() xlist = [] xval = [en2idx.get(word, 1) for word",
"xlist = [] xval = [en2idx.get(word, 1) for word in (target_sent + u\"",
"\"\"\" en2idx, idx2en = load_en_vocab() # Start session with g.graph.as_default(): sv = tf.train.Supervisor()",
"import tensorflow as tf import numpy as np from hyperparams import Hyperparams as",
"str(j)) _preds = sess.run(g.preds, {g.x: x, g.y: preds}) preds[:, j] = _preds[:, j]",
"train import Graph #from nltk.translate.bleu_score import corpus_bleu def eval(): # Load graph g",
"constant_values=(0, 0)) \"\"\" en2idx, idx2en = load_en_vocab() # Start session with g.graph.as_default(): sv",
"x = [de2idx.get(word, 1) for word in (source_sent + u\" </S>\").split()] # 1:",
"from data_load import load_test_data, load_de_vocab, load_en_vocab from train import Graph #from nltk.translate.bleu_score import",
"sess: ## Restore parameters sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)) while(True): prompt = raw_input() xlist = []",
"g.graph.as_default(): sv = tf.train.Supervisor() with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: ## Restore parameters sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir))",
"hp.maxlen - len(x)], 'constant', constant_values=(0, 0)) list_of_refs, hypotheses = [], [] for i",
"import print_function import codecs import os import tensorflow as tf import numpy as",
"print(\"Graph loaded\") # Load data # X, Sources, Targets = load_test_data() \"\"\" x_list,",
"# Start session with g.graph.as_default(): sv = tf.train.Supervisor() with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: ##",
"target_sents): x = [de2idx.get(word, 1) for word in (source_sent + u\" </S>\").split()] #",
"[0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0)) \"\"\" en2idx, idx2en = load_en_vocab() # Start session",
"''' from __future__ import print_function import codecs import os import tensorflow as tf",
"raw_input() ### Autoregressive inference preds = np.zeros((hp.batch_size, hp.maxlen), np.int32) for j in range(hp.maxlen):",
"u\" </S>\").split()] # 1: OOV, </S>: End of Text y = [en2idx.get(word, 1)",
"Load graph g = Graph(is_training=False) print(\"Graph loaded\") # Load data # X, Sources,",
"</S>\").split()] if (len(xval) <= hp.maxlen): xlist.append(np.array(xval)) X = np.zeros([len(xlist), hp.maxlen], np.int32) for i,",
"preds = np.zeros((hp.batch_size, hp.maxlen), np.int32) for j in range(hp.maxlen): #print(\"j: \" + str(j))",
"while loop in main for pred in preds: got = \" \".join(idx2en[idx] for",
"loop in main for pred in preds: got = \" \".join(idx2en[idx] for idx",
"en2idx, idx2en = load_en_vocab() # Start session with g.graph.as_default(): sv = tf.train.Supervisor() with",
"in preds: got = \" \".join(idx2en[idx] for idx in pred).split(\"</S>\")[0].strip() #return got print(got)",
"of Text y = [en2idx.get(word, 1) for word in (target_sent + u\" </S>\").split()]",
"\" + str(j)) _preds = sess.run(g.preds, {g.x: x, g.y: preds}) preds[:, j] =",
"np.int32) for i, xi in enumerate(xlist): X[i] = np.lib.pad(x, [0, hp.maxlen - len(x)],",
"#print(\"j: \" + str(j)) _preds = sess.run(g.preds, {g.x: x, g.y: preds}) preds[:, j]",
"Pad X = np.zeros([len(x_list), hp.maxlen], np.int32) Y = np.zeros([len(y_list), hp.maxlen], np.int32) for i,",
"x_list.append(np.array(x)) y_list.append(np.array(y)) Sources.append(source_sent) Targets.append(target_sent) # Pad X = np.zeros([len(x_list), hp.maxlen], np.int32) Y =",
"Sources, Targets = [], [], [], [] for source_sent, target_sent in zip(source_sents, target_sents):",
"= raw_input() ### Autoregressive inference preds = np.zeros((hp.batch_size, hp.maxlen), np.int32) for j in",
"# Load graph g = Graph(is_training=False) print(\"Graph loaded\") # Load data # X,",
"X[i] = np.lib.pad(x, [0, hp.maxlen - len(x)], 'constant', constant_values=(0, 0)) list_of_refs, hypotheses =",
"for i in range(len(X) // hp.batch_size): ### Get mini-batches x = X[i*hp.batch_size: (i+1)*hp.batch_size]",
"np.zeros((hp.batch_size, hp.maxlen), np.int32) for j in range(hp.maxlen): #print(\"j: \" + str(j)) _preds =",
"= sess.run(g.preds, {g.x: x, g.y: preds}) preds[:, j] = _preds[:, j] #print(pred) #",
"tf.train.latest_checkpoint(hp.logdir)) while(True): prompt = raw_input() xlist = [] xval = [en2idx.get(word, 1) for",
"cycling of the while loop in main for pred in preds: got =",
"#from nltk.translate.bleu_score import corpus_bleu def eval(): # Load graph g = Graph(is_training=False) print(\"Graph",
"pred in preds: got = \" \".join(idx2en[idx] for idx in pred).split(\"</S>\")[0].strip() #return got",
"import codecs import os import tensorflow as tf import numpy as np from",
"should be length 1 each time due to the cycling of the while",
"[] for source_sent, target_sent in zip(source_sents, target_sents): x = [de2idx.get(word, 1) for word",
"(source_sent + u\" </S>\").split()] # 1: OOV, </S>: End of Text y =",
"Targets = load_test_data() \"\"\" x_list, y_list, Sources, Targets = [], [], [], []",
"</S>\").split()] if max(len(x), len(y)) <=hp.maxlen: x_list.append(np.array(x)) y_list.append(np.array(y)) Sources.append(source_sent) Targets.append(target_sent) # Pad X =",
"y_list, Sources, Targets = [], [], [], [] for source_sent, target_sent in zip(source_sents,",
"xlist.append(np.array(xval)) X = np.zeros([len(xlist), hp.maxlen], np.int32) for i, xi in enumerate(xlist): X[i] =",
"list_of_refs, hypotheses = [], [] for i in range(len(X) // hp.batch_size): ### Get",
"data_load import load_test_data, load_de_vocab, load_en_vocab from train import Graph #from nltk.translate.bleu_score import corpus_bleu",
"Autoregressive inference preds = np.zeros((hp.batch_size, hp.maxlen), np.int32) for j in range(hp.maxlen): #print(\"j: \"",
"j] = _preds[:, j] #print(pred) # pred should be length 1 each time",
"in range(hp.maxlen): #print(\"j: \" + str(j)) _preds = sess.run(g.preds, {g.x: x, g.y: preds})",
"enumerate(zip(x_list, y_list)): X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0)) \"\"\" en2idx, idx2en",
"= np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0)) \"\"\" en2idx, idx2en = load_en_vocab() #",
"xi in enumerate(xlist): X[i] = np.lib.pad(x, [0, hp.maxlen - len(x)], 'constant', constant_values=(0, 0))",
"= np.zeros((hp.batch_size, hp.maxlen), np.int32) for j in range(hp.maxlen): #print(\"j: \" + str(j)) _preds",
"(target_sent + u\" </S>\").split()] if max(len(x), len(y)) <=hp.maxlen: x_list.append(np.array(x)) y_list.append(np.array(y)) Sources.append(source_sent) Targets.append(target_sent) #",
"source_sent, target_sent in zip(source_sents, target_sents): x = [de2idx.get(word, 1) for word in (source_sent",
"1 each time due to the cycling of the while loop in main",
"= [en2idx.get(word, 1) for word in (target_sent + u\" </S>\").split()] if max(len(x), len(y))",
"1: OOV, </S>: End of Text y = [en2idx.get(word, 1) for word in",
"</S>\").split()] # 1: OOV, </S>: End of Text y = [en2idx.get(word, 1) for",
"j] #print(pred) # pred should be length 1 each time due to the",
"y) in enumerate(zip(x_list, y_list)): X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0)) \"\"\"",
"np.int32) Y = np.zeros([len(y_list), hp.maxlen], np.int32) for i, (x, y) in enumerate(zip(x_list, y_list)):",
"in main for pred in preds: got = \" \".join(idx2en[idx] for idx in",
"word in (source_sent + u\" </S>\").split()] # 1: OOV, </S>: End of Text",
"nltk.translate.bleu_score import corpus_bleu def eval(): # Load graph g = Graph(is_training=False) print(\"Graph loaded\")",
"codecs import os import tensorflow as tf import numpy as np from hyperparams",
"\"\"\" x_list, y_list, Sources, Targets = [], [], [], [] for source_sent, target_sent",
"u\" </S>\").split()] if (len(xval) <= hp.maxlen): xlist.append(np.array(xval)) X = np.zeros([len(xlist), hp.maxlen], np.int32) for",
"Targets = [], [], [], [] for source_sent, target_sent in zip(source_sents, target_sents): x",
"X = np.zeros([len(xlist), hp.maxlen], np.int32) for i, xi in enumerate(xlist): X[i] = np.lib.pad(x,",
"x, g.y: preds}) preds[:, j] = _preds[:, j] #print(pred) # pred should be",
"u\" </S>\").split()] if max(len(x), len(y)) <=hp.maxlen: x_list.append(np.array(x)) y_list.append(np.array(y)) Sources.append(source_sent) Targets.append(target_sent) # Pad X",
"## Restore parameters sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)) while(True): prompt = raw_input() xlist = [] xval",
"= Graph(is_training=False) print(\"Graph loaded\") # Load data # X, Sources, Targets = load_test_data()",
"g = Graph(is_training=False) print(\"Graph loaded\") # Load data # X, Sources, Targets =",
"0)) list_of_refs, hypotheses = [], [] for i in range(len(X) // hp.batch_size): ###",
"from train import Graph #from nltk.translate.bleu_score import corpus_bleu def eval(): # Load graph",
"# X, Sources, Targets = load_test_data() \"\"\" x_list, y_list, Sources, Targets = [],",
"load_en_vocab() # Start session with g.graph.as_default(): sv = tf.train.Supervisor() with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:",
"tf import numpy as np from hyperparams import Hyperparams as hp from data_load",
"y_list)): X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0)) \"\"\" en2idx, idx2en =",
"(len(xval) <= hp.maxlen): xlist.append(np.array(xval)) X = np.zeros([len(xlist), hp.maxlen], np.int32) for i, xi in",
"preds}) preds[:, j] = _preds[:, j] #print(pred) # pred should be length 1",
"= _preds[:, j] #print(pred) # pred should be length 1 each time due",
"sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: ## Restore parameters sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)) while(True): prompt = raw_input() xlist",
"prompt = raw_input() xlist = [] xval = [en2idx.get(word, 1) for word in",
"(x, y) in enumerate(zip(x_list, y_list)): X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0))",
"Restore parameters sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)) while(True): prompt = raw_input() xlist = [] xval =",
"if max(len(x), len(y)) <=hp.maxlen: x_list.append(np.array(x)) y_list.append(np.array(y)) Sources.append(source_sent) Targets.append(target_sent) # Pad X = np.zeros([len(x_list),",
"= [en2idx.get(word, 1) for word in (target_sent + u\" </S>\").split()] if (len(xval) <=",
"hyperparams import Hyperparams as hp from data_load import load_test_data, load_de_vocab, load_en_vocab from train",
"= np.lib.pad(x, [0, hp.maxlen - len(x)], 'constant', constant_values=(0, 0)) list_of_refs, hypotheses = [],",
"in (target_sent + u\" </S>\").split()] if max(len(x), len(y)) <=hp.maxlen: x_list.append(np.array(x)) y_list.append(np.array(y)) Sources.append(source_sent) Targets.append(target_sent)",
"with g.graph.as_default(): sv = tf.train.Supervisor() with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: ## Restore parameters sv.saver.restore(sess,",
"0)) \"\"\" en2idx, idx2en = load_en_vocab() # Start session with g.graph.as_default(): sv =",
"session with g.graph.as_default(): sv = tf.train.Supervisor() with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: ## Restore parameters",
"Graph #from nltk.translate.bleu_score import corpus_bleu def eval(): # Load graph g = Graph(is_training=False)",
"Start session with g.graph.as_default(): sv = tf.train.Supervisor() with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: ## Restore",
"raw_input() xlist = [] xval = [en2idx.get(word, 1) for word in (target_sent +",
"[], [], [] for source_sent, target_sent in zip(source_sents, target_sents): x = [de2idx.get(word, 1)",
"{g.x: x, g.y: preds}) preds[:, j] = _preds[:, j] #print(pred) # pred should",
"graph g = Graph(is_training=False) print(\"Graph loaded\") # Load data # X, Sources, Targets",
"= [de2idx.get(word, 1) for word in (source_sent + u\" </S>\").split()] # 1: OOV,",
"# Pad X = np.zeros([len(x_list), hp.maxlen], np.int32) Y = np.zeros([len(y_list), hp.maxlen], np.int32) for",
"= np.zeros([len(x_list), hp.maxlen], np.int32) Y = np.zeros([len(y_list), hp.maxlen], np.int32) for i, (x, y)",
"in (target_sent + u\" </S>\").split()] if (len(xval) <= hp.maxlen): xlist.append(np.array(xval)) X = np.zeros([len(xlist),",
"[], [], [], [] for source_sent, target_sent in zip(source_sents, target_sents): x = [de2idx.get(word,",
"[de2idx.get(word, 1) for word in (source_sent + u\" </S>\").split()] # 1: OOV, </S>:",
"np.lib.pad(x, [0, hp.maxlen - len(x)], 'constant', constant_values=(0, 0)) list_of_refs, hypotheses = [], []",
"sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)) while(True): prompt = raw_input() xlist = [] xval = [en2idx.get(word, 1)",
"np from hyperparams import Hyperparams as hp from data_load import load_test_data, load_de_vocab, load_en_vocab",
"as hp from data_load import load_test_data, load_de_vocab, load_en_vocab from train import Graph #from",
"hp.maxlen): xlist.append(np.array(xval)) X = np.zeros([len(xlist), hp.maxlen], np.int32) for i, xi in enumerate(xlist): X[i]",
"in enumerate(xlist): X[i] = np.lib.pad(x, [0, hp.maxlen - len(x)], 'constant', constant_values=(0, 0)) list_of_refs,",
"+ str(j)) _preds = sess.run(g.preds, {g.x: x, g.y: preds}) preds[:, j] = _preds[:,",
"load_en_vocab from train import Graph #from nltk.translate.bleu_score import corpus_bleu def eval(): # Load",
"Hyperparams as hp from data_load import load_test_data, load_de_vocab, load_en_vocab from train import Graph",
"</S>: End of Text y = [en2idx.get(word, 1) for word in (target_sent +",
"1) for word in (target_sent + u\" </S>\").split()] if max(len(x), len(y)) <=hp.maxlen: x_list.append(np.array(x))",
"= [], [] for i in range(len(X) // hp.batch_size): ### Get mini-batches x",
"range(hp.maxlen): #print(\"j: \" + str(j)) _preds = sess.run(g.preds, {g.x: x, g.y: preds}) preds[:,",
"tf.train.Supervisor() with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: ## Restore parameters sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)) while(True): prompt =",
"for word in (target_sent + u\" </S>\").split()] if max(len(x), len(y)) <=hp.maxlen: x_list.append(np.array(x)) y_list.append(np.array(y))",
"June 2017 by <NAME>. <EMAIL>. https://www.github.com/kyubyong/transformer ''' from __future__ import print_function import codecs",
"pred should be length 1 each time due to the cycling of the",
"Text y = [en2idx.get(word, 1) for word in (target_sent + u\" </S>\").split()] if",
"word in (target_sent + u\" </S>\").split()] if max(len(x), len(y)) <=hp.maxlen: x_list.append(np.array(x)) y_list.append(np.array(y)) Sources.append(source_sent)",
"X = np.zeros([len(x_list), hp.maxlen], np.int32) Y = np.zeros([len(y_list), hp.maxlen], np.int32) for i, (x,",
"= load_en_vocab() # Start session with g.graph.as_default(): sv = tf.train.Supervisor() with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as",
"the cycling of the while loop in main for pred in preds: got",
"1) for word in (target_sent + u\" </S>\").split()] if (len(xval) <= hp.maxlen): xlist.append(np.array(xval))",
"X[i*hp.batch_size: (i+1)*hp.batch_size] prompt = raw_input() ### Autoregressive inference preds = np.zeros((hp.batch_size, hp.maxlen), np.int32)",
"mini-batches x = X[i*hp.batch_size: (i+1)*hp.batch_size] prompt = raw_input() ### Autoregressive inference preds =",
"each time due to the cycling of the while loop in main for",
"= [] xval = [en2idx.get(word, 1) for word in (target_sent + u\" </S>\").split()]",
"X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0)) \"\"\" en2idx, idx2en = load_en_vocab()",
"''' June 2017 by <NAME>. <EMAIL>. https://www.github.com/kyubyong/transformer ''' from __future__ import print_function import",
"for i, xi in enumerate(xlist): X[i] = np.lib.pad(x, [0, hp.maxlen - len(x)], 'constant',",
"hp.maxlen), np.int32) for j in range(hp.maxlen): #print(\"j: \" + str(j)) _preds = sess.run(g.preds,",
"End of Text y = [en2idx.get(word, 1) for word in (target_sent + u\"",
"<NAME>. <EMAIL>. https://www.github.com/kyubyong/transformer ''' from __future__ import print_function import codecs import os import",
"got = \" \".join(idx2en[idx] for idx in pred).split(\"</S>\")[0].strip() #return got print(got) if __name__",
"_preds = sess.run(g.preds, {g.x: x, g.y: preds}) preds[:, j] = _preds[:, j] #print(pred)",
"idx2en = load_en_vocab() # Start session with g.graph.as_default(): sv = tf.train.Supervisor() with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True))",
"Graph(is_training=False) print(\"Graph loaded\") # Load data # X, Sources, Targets = load_test_data() \"\"\"",
"length 1 each time due to the cycling of the while loop in",
"hp.maxlen], np.int32) for i, (x, y) in enumerate(zip(x_list, y_list)): X[i] = np.lib.pad(x, [0,",
"'constant', constant_values=(0, 0)) \"\"\" en2idx, idx2en = load_en_vocab() # Start session with g.graph.as_default():",
"max(len(x), len(y)) <=hp.maxlen: x_list.append(np.array(x)) y_list.append(np.array(y)) Sources.append(source_sent) Targets.append(target_sent) # Pad X = np.zeros([len(x_list), hp.maxlen],",
"+ u\" </S>\").split()] # 1: OOV, </S>: End of Text y = [en2idx.get(word,",
"to the cycling of the while loop in main for pred in preds:",
"if (len(xval) <= hp.maxlen): xlist.append(np.array(xval)) X = np.zeros([len(xlist), hp.maxlen], np.int32) for i, xi",
"be length 1 each time due to the cycling of the while loop",
"Y = np.zeros([len(y_list), hp.maxlen], np.int32) for i, (x, y) in enumerate(zip(x_list, y_list)): X[i]",
"+ u\" </S>\").split()] if (len(xval) <= hp.maxlen): xlist.append(np.array(xval)) X = np.zeros([len(xlist), hp.maxlen], np.int32)",
"in (source_sent + u\" </S>\").split()] # 1: OOV, </S>: End of Text y",
"g.y: preds}) preds[:, j] = _preds[:, j] #print(pred) # pred should be length",
"load_test_data() \"\"\" x_list, y_list, Sources, Targets = [], [], [], [] for source_sent,",
"[] for i in range(len(X) // hp.batch_size): ### Get mini-batches x = X[i*hp.batch_size:",
"in zip(source_sents, target_sents): x = [de2idx.get(word, 1) for word in (source_sent + u\"",
"data # X, Sources, Targets = load_test_data() \"\"\" x_list, y_list, Sources, Targets =",
"len(x)], 'constant', constant_values=(0, 0)) list_of_refs, hypotheses = [], [] for i in range(len(X)",
"import load_test_data, load_de_vocab, load_en_vocab from train import Graph #from nltk.translate.bleu_score import corpus_bleu def",
"i, (x, y) in enumerate(zip(x_list, y_list)): X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0,",
"[en2idx.get(word, 1) for word in (target_sent + u\" </S>\").split()] if (len(xval) <= hp.maxlen):",
"<= hp.maxlen): xlist.append(np.array(xval)) X = np.zeros([len(xlist), hp.maxlen], np.int32) for i, xi in enumerate(xlist):",
"i, xi in enumerate(xlist): X[i] = np.lib.pad(x, [0, hp.maxlen - len(x)], 'constant', constant_values=(0,",
"eval(): # Load graph g = Graph(is_training=False) print(\"Graph loaded\") # Load data #",
"(target_sent + u\" </S>\").split()] if (len(xval) <= hp.maxlen): xlist.append(np.array(xval)) X = np.zeros([len(xlist), hp.maxlen],",
"due to the cycling of the while loop in main for pred in",
"numpy as np from hyperparams import Hyperparams as hp from data_load import load_test_data,",
"// hp.batch_size): ### Get mini-batches x = X[i*hp.batch_size: (i+1)*hp.batch_size] prompt = raw_input() ###",
"from __future__ import print_function import codecs import os import tensorflow as tf import",
"OOV, </S>: End of Text y = [en2idx.get(word, 1) for word in (target_sent",
"import corpus_bleu def eval(): # Load graph g = Graph(is_training=False) print(\"Graph loaded\") #",
"Load data # X, Sources, Targets = load_test_data() \"\"\" x_list, y_list, Sources, Targets",
"= tf.train.Supervisor() with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: ## Restore parameters sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir)) while(True): prompt",
"hp.maxlen], np.int32) Y = np.zeros([len(y_list), hp.maxlen], np.int32) for i, (x, y) in enumerate(zip(x_list,",
"main for pred in preds: got = \" \".join(idx2en[idx] for idx in pred).split(\"</S>\")[0].strip()",
"in enumerate(zip(x_list, y_list)): X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0)) \"\"\" en2idx,",
"inference preds = np.zeros((hp.batch_size, hp.maxlen), np.int32) for j in range(hp.maxlen): #print(\"j: \" +",
"- len(x)], 'constant', constant_values=(0, 0)) list_of_refs, hypotheses = [], [] for i in",
"#/usr/bin/python2 ''' June 2017 by <NAME>. <EMAIL>. https://www.github.com/kyubyong/transformer ''' from __future__ import print_function",
"range(len(X) // hp.batch_size): ### Get mini-batches x = X[i*hp.batch_size: (i+1)*hp.batch_size] prompt = raw_input()",
"target_sent in zip(source_sents, target_sents): x = [de2idx.get(word, 1) for word in (source_sent +",
"xval = [en2idx.get(word, 1) for word in (target_sent + u\" </S>\").split()] if (len(xval)",
"np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0)) \"\"\" en2idx, idx2en = load_en_vocab() # Start",
"j in range(hp.maxlen): #print(\"j: \" + str(j)) _preds = sess.run(g.preds, {g.x: x, g.y:",
"+ u\" </S>\").split()] if max(len(x), len(y)) <=hp.maxlen: x_list.append(np.array(x)) y_list.append(np.array(y)) Sources.append(source_sent) Targets.append(target_sent) # Pad"
] |
[
"to the underlaying data values in the event. \"\"\" __slots__ = ['request_type', 'path',",
"= status_code @property def content_type(self): return { key.lower(): value for key, value in",
"as long as it has access to the underlaying data values in the",
"from the body. This value should # only be set if the Content-Type",
"class Response: __slots__ = ['body', 'headers', 'status_code'] def __init__(self, body='', headers=None, status_code=codes.ok): self.body",
"json.loads(self.body) return self._json_body class Response: __slots__ = ['body', 'headers', 'status_code'] def __init__(self, body='',",
"long as it has access to the underlaying data values in the event.",
"path, resource, query_params, headers, uri_params, method, body, context, event): self.request_type = request_type self.path",
"payload. \"\"\" if self.headers.get('content-type', '').startswith('application/json'): if self._json_body is None: self._json_body = json.loads(self.body) return",
"from minik.status_codes import codes class MinikRequest: \"\"\" Simple wrapper of the data object",
"a view does not need to be concerned with the inner representation of",
"{ key.lower(): value for key, value in self.headers.items() }.get('content-type') def to_dict(self, binary_types=None): return",
"self._json_body = json.loads(self.body) return self._json_body class Response: __slots__ = ['body', 'headers', 'status_code'] def",
"set if the Content-Type header is application/json, # which is the default content",
"uri_params self.method = method self.body = body self.aws_context = context self.aws_event = event",
"header is application/json, # which is the default content type. self._json_body = None",
"self.request_type = request_type self.path = path self.resource = resource self.query_params = query_params self.headers",
"= body self.headers = headers or {} self.status_code = status_code @property def content_type(self):",
"be concerned with the inner representation of the APIGateway's event as long as",
"wrapper of the data object received from API Gateway. This object will parse",
"or {} self.status_code = status_code @property def content_type(self): return { key.lower(): value for",
"'resource', 'query_params', 'headers', 'uri_params', 'method', 'body', '_json_body', 'aws_context', 'aws_event'] def __init__(self, request_type, path,",
"body, context, event): self.request_type = request_type self.path = path self.resource = resource self.query_params",
"key.lower(): value for key, value in self.headers.items() }.get('content-type') def to_dict(self, binary_types=None): return {",
"'headers', 'status_code'] def __init__(self, body='', headers=None, status_code=codes.ok): self.body = body self.headers = headers",
"This object will parse a given API gateway event and it will transform",
"data values in the event. \"\"\" __slots__ = ['request_type', 'path', 'resource', 'query_params', 'headers',",
"if self._json_body is None: self._json_body = json.loads(self.body) return self._json_body class Response: __slots__ =",
"received from API Gateway. This object will parse a given API gateway event",
"given API gateway event and it will transform it into a more user",
"key, value in self.headers.items() }.get('content-type') def to_dict(self, binary_types=None): return { 'headers': self.headers, 'statusCode':",
"class MinikRequest: \"\"\" Simple wrapper of the data object received from API Gateway.",
"@property def content_type(self): return { key.lower(): value for key, value in self.headers.items() }.get('content-type')",
"The idea is that a view does not need to be concerned with",
"user friendly object to operate on. The idea is that a view does",
"= context self.aws_event = event # The parsed JSON from the body. This",
"return self._json_body class Response: __slots__ = ['body', 'headers', 'status_code'] def __init__(self, body='', headers=None,",
"a given API gateway event and it will transform it into a more",
"'body', '_json_body', 'aws_context', 'aws_event'] def __init__(self, request_type, path, resource, query_params, headers, uri_params, method,",
"status_code @property def content_type(self): return { key.lower(): value for key, value in self.headers.items()",
"self.status_code = status_code @property def content_type(self): return { key.lower(): value for key, value",
"None: self._json_body = json.loads(self.body) return self._json_body class Response: __slots__ = ['body', 'headers', 'status_code']",
"content_type(self): return { key.lower(): value for key, value in self.headers.items() }.get('content-type') def to_dict(self,",
"__slots__ = ['body', 'headers', 'status_code'] def __init__(self, body='', headers=None, status_code=codes.ok): self.body = body",
"representation of the APIGateway's event as long as it has access to the",
"into a more user friendly object to operate on. The idea is that",
"is None: self._json_body = json.loads(self.body) return self._json_body class Response: __slots__ = ['body', 'headers',",
"Response: __slots__ = ['body', 'headers', 'status_code'] def __init__(self, body='', headers=None, status_code=codes.ok): self.body =",
"has access to the underlaying data values in the event. \"\"\" __slots__ =",
"json_body(self): \"\"\" Lazy loading/parsing of the json payload. \"\"\" if self.headers.get('content-type', '').startswith('application/json'): if",
"gateway event and it will transform it into a more user friendly object",
"concerned with the inner representation of the APIGateway's event as long as it",
"# The parsed JSON from the body. This value should # only be",
"of the APIGateway's event as long as it has access to the underlaying",
"the underlaying data values in the event. \"\"\" __slots__ = ['request_type', 'path', 'resource',",
"'path', 'resource', 'query_params', 'headers', 'uri_params', 'method', 'body', '_json_body', 'aws_context', 'aws_event'] def __init__(self, request_type,",
"self._json_body class Response: __slots__ = ['body', 'headers', 'status_code'] def __init__(self, body='', headers=None, status_code=codes.ok):",
"is that a view does not need to be concerned with the inner",
"loading/parsing of the json payload. \"\"\" if self.headers.get('content-type', '').startswith('application/json'): if self._json_body is None:",
"a more user friendly object to operate on. The idea is that a",
"type. self._json_body = None @property def json_body(self): \"\"\" Lazy loading/parsing of the json",
"}.get('content-type') def to_dict(self, binary_types=None): return { 'headers': self.headers, 'statusCode': self.status_code, 'body': self.body }",
"the APIGateway's event as long as it has access to the underlaying data",
"\"\"\" Simple wrapper of the data object received from API Gateway. This object",
"'aws_context', 'aws_event'] def __init__(self, request_type, path, resource, query_params, headers, uri_params, method, body, context,",
"be set if the Content-Type header is application/json, # which is the default",
"method, body, context, event): self.request_type = request_type self.path = path self.resource = resource",
"= resource self.query_params = query_params self.headers = headers self.uri_params = uri_params self.method =",
"which is the default content type. self._json_body = None @property def json_body(self): \"\"\"",
"JSON from the body. This value should # only be set if the",
"Content-Type header is application/json, # which is the default content type. self._json_body =",
"API Gateway. This object will parse a given API gateway event and it",
"in self.headers.items() }.get('content-type') def to_dict(self, binary_types=None): return { 'headers': self.headers, 'statusCode': self.status_code, 'body':",
"= ['request_type', 'path', 'resource', 'query_params', 'headers', 'uri_params', 'method', 'body', '_json_body', 'aws_context', 'aws_event'] def",
"'uri_params', 'method', 'body', '_json_body', 'aws_context', 'aws_event'] def __init__(self, request_type, path, resource, query_params, headers,",
"value should # only be set if the Content-Type header is application/json, #",
"= headers or {} self.status_code = status_code @property def content_type(self): return { key.lower():",
"self.headers.items() }.get('content-type') def to_dict(self, binary_types=None): return { 'headers': self.headers, 'statusCode': self.status_code, 'body': self.body",
"def __init__(self, body='', headers=None, status_code=codes.ok): self.body = body self.headers = headers or {}",
"that a view does not need to be concerned with the inner representation",
"# which is the default content type. self._json_body = None @property def json_body(self):",
"of the json payload. \"\"\" if self.headers.get('content-type', '').startswith('application/json'): if self._json_body is None: self._json_body",
"should # only be set if the Content-Type header is application/json, # which",
"the default content type. self._json_body = None @property def json_body(self): \"\"\" Lazy loading/parsing",
"'method', 'body', '_json_body', 'aws_context', 'aws_event'] def __init__(self, request_type, path, resource, query_params, headers, uri_params,",
"value for key, value in self.headers.items() }.get('content-type') def to_dict(self, binary_types=None): return { 'headers':",
"event and it will transform it into a more user friendly object to",
"Simple wrapper of the data object received from API Gateway. This object will",
"as it has access to the underlaying data values in the event. \"\"\"",
"not need to be concerned with the inner representation of the APIGateway's event",
"= query_params self.headers = headers self.uri_params = uri_params self.method = method self.body =",
"= json.loads(self.body) return self._json_body class Response: __slots__ = ['body', 'headers', 'status_code'] def __init__(self,",
"body self.aws_context = context self.aws_event = event # The parsed JSON from the",
"self.path = path self.resource = resource self.query_params = query_params self.headers = headers self.uri_params",
"__init__(self, request_type, path, resource, query_params, headers, uri_params, method, body, context, event): self.request_type =",
"context self.aws_event = event # The parsed JSON from the body. This value",
"= None @property def json_body(self): \"\"\" Lazy loading/parsing of the json payload. \"\"\"",
"self.aws_context = context self.aws_event = event # The parsed JSON from the body.",
"object received from API Gateway. This object will parse a given API gateway",
"import codes class MinikRequest: \"\"\" Simple wrapper of the data object received from",
"<filename>minik/models.py import json from minik.status_codes import codes class MinikRequest: \"\"\" Simple wrapper of",
"access to the underlaying data values in the event. \"\"\" __slots__ = ['request_type',",
"parsed JSON from the body. This value should # only be set if",
"self.body = body self.headers = headers or {} self.status_code = status_code @property def",
"self.method = method self.body = body self.aws_context = context self.aws_event = event #",
"request_type self.path = path self.resource = resource self.query_params = query_params self.headers = headers",
"self.body = body self.aws_context = context self.aws_event = event # The parsed JSON",
"object will parse a given API gateway event and it will transform it",
"json payload. \"\"\" if self.headers.get('content-type', '').startswith('application/json'): if self._json_body is None: self._json_body = json.loads(self.body)",
"self.headers = headers self.uri_params = uri_params self.method = method self.body = body self.aws_context",
"is the default content type. self._json_body = None @property def json_body(self): \"\"\" Lazy",
"will transform it into a more user friendly object to operate on. The",
"resource self.query_params = query_params self.headers = headers self.uri_params = uri_params self.method = method",
"def json_body(self): \"\"\" Lazy loading/parsing of the json payload. \"\"\" if self.headers.get('content-type', '').startswith('application/json'):",
"@property def json_body(self): \"\"\" Lazy loading/parsing of the json payload. \"\"\" if self.headers.get('content-type',",
"'aws_event'] def __init__(self, request_type, path, resource, query_params, headers, uri_params, method, body, context, event):",
"__init__(self, body='', headers=None, status_code=codes.ok): self.body = body self.headers = headers or {} self.status_code",
"= ['body', 'headers', 'status_code'] def __init__(self, body='', headers=None, status_code=codes.ok): self.body = body self.headers",
"context, event): self.request_type = request_type self.path = path self.resource = resource self.query_params =",
"event # The parsed JSON from the body. This value should # only",
"for key, value in self.headers.items() }.get('content-type') def to_dict(self, binary_types=None): return { 'headers': self.headers,",
"view does not need to be concerned with the inner representation of the",
"does not need to be concerned with the inner representation of the APIGateway's",
"need to be concerned with the inner representation of the APIGateway's event as",
"data object received from API Gateway. This object will parse a given API",
"friendly object to operate on. The idea is that a view does not",
"underlaying data values in the event. \"\"\" __slots__ = ['request_type', 'path', 'resource', 'query_params',",
"default content type. self._json_body = None @property def json_body(self): \"\"\" Lazy loading/parsing of",
"'').startswith('application/json'): if self._json_body is None: self._json_body = json.loads(self.body) return self._json_body class Response: __slots__",
"def __init__(self, request_type, path, resource, query_params, headers, uri_params, method, body, context, event): self.request_type",
"with the inner representation of the APIGateway's event as long as it has",
"API gateway event and it will transform it into a more user friendly",
"= request_type self.path = path self.resource = resource self.query_params = query_params self.headers =",
"= method self.body = body self.aws_context = context self.aws_event = event # The",
"self.headers = headers or {} self.status_code = status_code @property def content_type(self): return {",
"it into a more user friendly object to operate on. The idea is",
"the data object received from API Gateway. This object will parse a given",
"'headers', 'uri_params', 'method', 'body', '_json_body', 'aws_context', 'aws_event'] def __init__(self, request_type, path, resource, query_params,",
"and it will transform it into a more user friendly object to operate",
"body self.headers = headers or {} self.status_code = status_code @property def content_type(self): return",
"on. The idea is that a view does not need to be concerned",
"def content_type(self): return { key.lower(): value for key, value in self.headers.items() }.get('content-type') def",
"application/json, # which is the default content type. self._json_body = None @property def",
"from API Gateway. This object will parse a given API gateway event and",
"APIGateway's event as long as it has access to the underlaying data values",
"resource, query_params, headers, uri_params, method, body, context, event): self.request_type = request_type self.path =",
"inner representation of the APIGateway's event as long as it has access to",
"query_params self.headers = headers self.uri_params = uri_params self.method = method self.body = body",
"event as long as it has access to the underlaying data values in",
"path self.resource = resource self.query_params = query_params self.headers = headers self.uri_params = uri_params",
"self._json_body is None: self._json_body = json.loads(self.body) return self._json_body class Response: __slots__ = ['body',",
"__slots__ = ['request_type', 'path', 'resource', 'query_params', 'headers', 'uri_params', 'method', 'body', '_json_body', 'aws_context', 'aws_event']",
"the body. This value should # only be set if the Content-Type header",
"codes class MinikRequest: \"\"\" Simple wrapper of the data object received from API",
"\"\"\" if self.headers.get('content-type', '').startswith('application/json'): if self._json_body is None: self._json_body = json.loads(self.body) return self._json_body",
"'_json_body', 'aws_context', 'aws_event'] def __init__(self, request_type, path, resource, query_params, headers, uri_params, method, body,",
"\"\"\" __slots__ = ['request_type', 'path', 'resource', 'query_params', 'headers', 'uri_params', 'method', 'body', '_json_body', 'aws_context',",
"= uri_params self.method = method self.body = body self.aws_context = context self.aws_event =",
"the json payload. \"\"\" if self.headers.get('content-type', '').startswith('application/json'): if self._json_body is None: self._json_body =",
"self.query_params = query_params self.headers = headers self.uri_params = uri_params self.method = method self.body",
"status_code=codes.ok): self.body = body self.headers = headers or {} self.status_code = status_code @property",
"value in self.headers.items() }.get('content-type') def to_dict(self, binary_types=None): return { 'headers': self.headers, 'statusCode': self.status_code,",
"in the event. \"\"\" __slots__ = ['request_type', 'path', 'resource', 'query_params', 'headers', 'uri_params', 'method',",
"headers self.uri_params = uri_params self.method = method self.body = body self.aws_context = context",
"query_params, headers, uri_params, method, body, context, event): self.request_type = request_type self.path = path",
"of the data object received from API Gateway. This object will parse a",
"None @property def json_body(self): \"\"\" Lazy loading/parsing of the json payload. \"\"\" if",
"The parsed JSON from the body. This value should # only be set",
"This value should # only be set if the Content-Type header is application/json,",
"['body', 'headers', 'status_code'] def __init__(self, body='', headers=None, status_code=codes.ok): self.body = body self.headers =",
"\"\"\" Lazy loading/parsing of the json payload. \"\"\" if self.headers.get('content-type', '').startswith('application/json'): if self._json_body",
"the Content-Type header is application/json, # which is the default content type. self._json_body",
"Lazy loading/parsing of the json payload. \"\"\" if self.headers.get('content-type', '').startswith('application/json'): if self._json_body is",
"'query_params', 'headers', 'uri_params', 'method', 'body', '_json_body', 'aws_context', 'aws_event'] def __init__(self, request_type, path, resource,",
"method self.body = body self.aws_context = context self.aws_event = event # The parsed",
"idea is that a view does not need to be concerned with the",
"content type. self._json_body = None @property def json_body(self): \"\"\" Lazy loading/parsing of the",
"headers=None, status_code=codes.ok): self.body = body self.headers = headers or {} self.status_code = status_code",
"is application/json, # which is the default content type. self._json_body = None @property",
"self.resource = resource self.query_params = query_params self.headers = headers self.uri_params = uri_params self.method",
"body. This value should # only be set if the Content-Type header is",
"= body self.aws_context = context self.aws_event = event # The parsed JSON from",
"it will transform it into a more user friendly object to operate on.",
"body='', headers=None, status_code=codes.ok): self.body = body self.headers = headers or {} self.status_code =",
"parse a given API gateway event and it will transform it into a",
"to be concerned with the inner representation of the APIGateway's event as long",
"request_type, path, resource, query_params, headers, uri_params, method, body, context, event): self.request_type = request_type",
"event): self.request_type = request_type self.path = path self.resource = resource self.query_params = query_params",
"self.uri_params = uri_params self.method = method self.body = body self.aws_context = context self.aws_event",
"# only be set if the Content-Type header is application/json, # which is",
"more user friendly object to operate on. The idea is that a view",
"the inner representation of the APIGateway's event as long as it has access",
"the event. \"\"\" __slots__ = ['request_type', 'path', 'resource', 'query_params', 'headers', 'uri_params', 'method', 'body',",
"operate on. The idea is that a view does not need to be",
"headers or {} self.status_code = status_code @property def content_type(self): return { key.lower(): value",
"object to operate on. The idea is that a view does not need",
"uri_params, method, body, context, event): self.request_type = request_type self.path = path self.resource =",
"['request_type', 'path', 'resource', 'query_params', 'headers', 'uri_params', 'method', 'body', '_json_body', 'aws_context', 'aws_event'] def __init__(self,",
"will parse a given API gateway event and it will transform it into",
"return { key.lower(): value for key, value in self.headers.items() }.get('content-type') def to_dict(self, binary_types=None):",
"= path self.resource = resource self.query_params = query_params self.headers = headers self.uri_params =",
"= headers self.uri_params = uri_params self.method = method self.body = body self.aws_context =",
"MinikRequest: \"\"\" Simple wrapper of the data object received from API Gateway. This",
"if the Content-Type header is application/json, # which is the default content type.",
"json from minik.status_codes import codes class MinikRequest: \"\"\" Simple wrapper of the data",
"headers, uri_params, method, body, context, event): self.request_type = request_type self.path = path self.resource",
"self.headers.get('content-type', '').startswith('application/json'): if self._json_body is None: self._json_body = json.loads(self.body) return self._json_body class Response:",
"'status_code'] def __init__(self, body='', headers=None, status_code=codes.ok): self.body = body self.headers = headers or",
"Gateway. This object will parse a given API gateway event and it will",
"self._json_body = None @property def json_body(self): \"\"\" Lazy loading/parsing of the json payload.",
"values in the event. \"\"\" __slots__ = ['request_type', 'path', 'resource', 'query_params', 'headers', 'uri_params',",
"only be set if the Content-Type header is application/json, # which is the",
"if self.headers.get('content-type', '').startswith('application/json'): if self._json_body is None: self._json_body = json.loads(self.body) return self._json_body class",
"import json from minik.status_codes import codes class MinikRequest: \"\"\" Simple wrapper of the",
"minik.status_codes import codes class MinikRequest: \"\"\" Simple wrapper of the data object received",
"{} self.status_code = status_code @property def content_type(self): return { key.lower(): value for key,",
"self.aws_event = event # The parsed JSON from the body. This value should",
"= event # The parsed JSON from the body. This value should #",
"it has access to the underlaying data values in the event. \"\"\" __slots__",
"to operate on. The idea is that a view does not need to",
"event. \"\"\" __slots__ = ['request_type', 'path', 'resource', 'query_params', 'headers', 'uri_params', 'method', 'body', '_json_body',",
"transform it into a more user friendly object to operate on. The idea"
] |
[
"\"forecastsite.settings\") try: from django.core.management import execute_from_command_line except ImportError: try: import django except ImportError:",
"#!/usr/bin/env python import os import sys if __name__ == \"__main__\": os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"forecastsite.settings\") try:",
"__name__ == \"__main__\": os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"forecastsite.settings\") try: from django.core.management import execute_from_command_line except ImportError: try:",
"== \"__main__\": os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"forecastsite.settings\") try: from django.core.management import execute_from_command_line except ImportError: try: import",
"except ImportError: try: import django except ImportError: raise ImportError( \"Nie można zaimportować Django\"",
"import django except ImportError: raise ImportError( \"Nie można zaimportować Django\" ) raise execute_from_command_line(sys.argv)",
"os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"forecastsite.settings\") try: from django.core.management import execute_from_command_line except ImportError: try: import django except",
"import sys if __name__ == \"__main__\": os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"forecastsite.settings\") try: from django.core.management import execute_from_command_line",
"from django.core.management import execute_from_command_line except ImportError: try: import django except ImportError: raise ImportError(",
"execute_from_command_line except ImportError: try: import django except ImportError: raise ImportError( \"Nie można zaimportować",
"django.core.management import execute_from_command_line except ImportError: try: import django except ImportError: raise ImportError( \"Nie",
"ImportError: try: import django except ImportError: raise ImportError( \"Nie można zaimportować Django\" )",
"\"__main__\": os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"forecastsite.settings\") try: from django.core.management import execute_from_command_line except ImportError: try: import django",
"sys if __name__ == \"__main__\": os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"forecastsite.settings\") try: from django.core.management import execute_from_command_line except",
"try: from django.core.management import execute_from_command_line except ImportError: try: import django except ImportError: raise",
"import os import sys if __name__ == \"__main__\": os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"forecastsite.settings\") try: from django.core.management",
"python import os import sys if __name__ == \"__main__\": os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"forecastsite.settings\") try: from",
"import execute_from_command_line except ImportError: try: import django except ImportError: raise ImportError( \"Nie można",
"if __name__ == \"__main__\": os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"forecastsite.settings\") try: from django.core.management import execute_from_command_line except ImportError:",
"os import sys if __name__ == \"__main__\": os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"forecastsite.settings\") try: from django.core.management import",
"try: import django except ImportError: raise ImportError( \"Nie można zaimportować Django\" ) raise"
] |
[
"number. For example: A -> 1 B -> 2 C -> 3 ...",
"the length of input string s ## Time Complexity: O( n ) #",
"''' Description: Given a column title as appear in an Excel sheet, return",
"27 AB -> 28 ... Example 1: Input: \"A\" Output: 1 Example 2:",
"maintain call stack for recursion, which is of O( n ). def test_bench():",
"'AZ', 'BA','ZY', 'ZZ','AAA'] for s in test_data: n = Solution().titleToNumber(s) print(n) return if",
"Given a column title as appear in an Excel sheet, return its corresponding",
"Time Complexity: O( n ) # # The major overhead in time is",
"str) -> int: if len(s) == 1: # base case return ord(s)-64 else:",
"1 Example 2: Input: \"AB\" Output: 28 Example 3: Input: \"ZY\" Output: 701",
"space is to maintain call stack for recursion, which is of O( n",
"overhead in space is to maintain call stack for recursion, which is of",
"for recursion, which is of O( n ). def test_bench(): test_data = ['A',",
"titleToNumber(self, s: str) -> int: if len(s) == 1: # base case return",
"O( n ) # # The major overhead in space is to maintain",
"3 ... Z -> 26 AA -> 27 AB -> 28 ... Example",
"1: Input: \"A\" Output: 1 Example 2: Input: \"AB\" Output: 28 Example 3:",
"in time is the call depth of recursion, which is of O( n",
"-> 27 AB -> 28 ... Example 1: Input: \"A\" Output: 1 Example",
"s: str) -> int: if len(s) == 1: # base case return ord(s)-64",
"n ) # # The major overhead in space is to maintain call",
"as appear in an Excel sheet, return its corresponding column number. For example:",
"int: if len(s) == 1: # base case return ord(s)-64 else: # general",
"in space is to maintain call stack for recursion, which is of O(",
"28 ... Example 1: Input: \"A\" Output: 1 Example 2: Input: \"AB\" Output:",
"AB -> 28 ... Example 1: Input: \"A\" Output: 1 Example 2: Input:",
"Solution: def titleToNumber(self, s: str) -> int: if len(s) == 1: # base",
"string s ## Time Complexity: O( n ) # # The major overhead",
"column title as appear in an Excel sheet, return its corresponding column number.",
"test_bench(): test_data = ['A', 'AB', 'AZ', 'BA','ZY', 'ZZ','AAA'] for s in test_data: n",
"Example 2: Input: \"AB\" Output: 28 Example 3: Input: \"ZY\" Output: 701 '''",
"which is of O( n ). def test_bench(): test_data = ['A', 'AB', 'AZ',",
"26*self.titleToNumber( s[:-1] ) + self.titleToNumber( s[-1] ) # n : the length of",
"C -> 3 ... Z -> 26 AA -> 27 AB -> 28",
"701 ''' class Solution: def titleToNumber(self, s: str) -> int: if len(s) ==",
"2: Input: \"AB\" Output: 28 Example 3: Input: \"ZY\" Output: 701 ''' class",
"is of O( n ). ## Space Complexity: O( n ) # #",
") # # The major overhead in space is to maintain call stack",
"O( n ) # # The major overhead in time is the call",
"# # The major overhead in space is to maintain call stack for",
"recursion, which is of O( n ). def test_bench(): test_data = ['A', 'AB',",
"# base case return ord(s)-64 else: # general case return 26*self.titleToNumber( s[:-1] )",
"if len(s) == 1: # base case return ord(s)-64 else: # general case",
"return ord(s)-64 else: # general case return 26*self.titleToNumber( s[:-1] ) + self.titleToNumber( s[-1]",
"Space Complexity: O( n ) # # The major overhead in space is",
"The major overhead in time is the call depth of recursion, which is",
"-> 3 ... Z -> 26 AA -> 27 AB -> 28 ...",
"title as appear in an Excel sheet, return its corresponding column number. For",
"# n : the length of input string s ## Time Complexity: O(",
"test_data = ['A', 'AB', 'AZ', 'BA','ZY', 'ZZ','AAA'] for s in test_data: n =",
"# The major overhead in space is to maintain call stack for recursion,",
"-> 26 AA -> 27 AB -> 28 ... Example 1: Input: \"A\"",
"return 26*self.titleToNumber( s[:-1] ) + self.titleToNumber( s[-1] ) # n : the length",
"def test_bench(): test_data = ['A', 'AB', 'AZ', 'BA','ZY', 'ZZ','AAA'] for s in test_data:",
"). ## Space Complexity: O( n ) # # The major overhead in",
"The major overhead in space is to maintain call stack for recursion, which",
"call stack for recursion, which is of O( n ). def test_bench(): test_data",
"a column title as appear in an Excel sheet, return its corresponding column",
"time is the call depth of recursion, which is of O( n ).",
"n ). def test_bench(): test_data = ['A', 'AB', 'AZ', 'BA','ZY', 'ZZ','AAA'] for s",
"class Solution: def titleToNumber(self, s: str) -> int: if len(s) == 1: #",
"n ) # # The major overhead in time is the call depth",
"n ). ## Space Complexity: O( n ) # # The major overhead",
"an Excel sheet, return its corresponding column number. For example: A -> 1",
"len(s) == 1: # base case return ord(s)-64 else: # general case return",
"case return ord(s)-64 else: # general case return 26*self.titleToNumber( s[:-1] ) + self.titleToNumber(",
"\"A\" Output: 1 Example 2: Input: \"AB\" Output: 28 Example 3: Input: \"ZY\"",
"column number. For example: A -> 1 B -> 2 C -> 3",
") # n : the length of input string s ## Time Complexity:",
"ord(s)-64 else: # general case return 26*self.titleToNumber( s[:-1] ) + self.titleToNumber( s[-1] )",
"\"ZY\" Output: 701 ''' class Solution: def titleToNumber(self, s: str) -> int: if",
"## Space Complexity: O( n ) # # The major overhead in space",
"-> int: if len(s) == 1: # base case return ord(s)-64 else: #",
"Input: \"A\" Output: 1 Example 2: Input: \"AB\" Output: 28 Example 3: Input:",
"major overhead in time is the call depth of recursion, which is of",
"of O( n ). def test_bench(): test_data = ['A', 'AB', 'AZ', 'BA','ZY', 'ZZ','AAA']",
"# general case return 26*self.titleToNumber( s[:-1] ) + self.titleToNumber( s[-1] ) # n",
"-> 1 B -> 2 C -> 3 ... Z -> 26 AA",
"s[:-1] ) + self.titleToNumber( s[-1] ) # n : the length of input",
"overhead in time is the call depth of recursion, which is of O(",
"appear in an Excel sheet, return its corresponding column number. For example: A",
") + self.titleToNumber( s[-1] ) # n : the length of input string",
"1: # base case return ord(s)-64 else: # general case return 26*self.titleToNumber( s[:-1]",
"## Time Complexity: O( n ) # # The major overhead in time",
"Complexity: O( n ) # # The major overhead in time is the",
"Output: 28 Example 3: Input: \"ZY\" Output: 701 ''' class Solution: def titleToNumber(self,",
"of recursion, which is of O( n ). ## Space Complexity: O( n",
"Input: \"ZY\" Output: 701 ''' class Solution: def titleToNumber(self, s: str) -> int:",
"Input: \"AB\" Output: 28 Example 3: Input: \"ZY\" Output: 701 ''' class Solution:",
"n : the length of input string s ## Time Complexity: O( n",
"recursion, which is of O( n ). ## Space Complexity: O( n )",
": the length of input string s ## Time Complexity: O( n )",
"general case return 26*self.titleToNumber( s[:-1] ) + self.titleToNumber( s[-1] ) # n :",
"Example 3: Input: \"ZY\" Output: 701 ''' class Solution: def titleToNumber(self, s: str)",
"of O( n ). ## Space Complexity: O( n ) # # The",
"corresponding column number. For example: A -> 1 B -> 2 C ->",
"Description: Given a column title as appear in an Excel sheet, return its",
"# # The major overhead in time is the call depth of recursion,",
"in an Excel sheet, return its corresponding column number. For example: A ->",
"Example 1: Input: \"A\" Output: 1 Example 2: Input: \"AB\" Output: 28 Example",
"is of O( n ). def test_bench(): test_data = ['A', 'AB', 'AZ', 'BA','ZY',",
"input string s ## Time Complexity: O( n ) # # The major",
"... Example 1: Input: \"A\" Output: 1 Example 2: Input: \"AB\" Output: 28",
"def titleToNumber(self, s: str) -> int: if len(s) == 1: # base case",
"Complexity: O( n ) # # The major overhead in space is to",
"1 B -> 2 C -> 3 ... Z -> 26 AA ->",
"AA -> 27 AB -> 28 ... Example 1: Input: \"A\" Output: 1",
"to maintain call stack for recursion, which is of O( n ). def",
") # # The major overhead in time is the call depth of",
"Output: 1 Example 2: Input: \"AB\" Output: 28 Example 3: Input: \"ZY\" Output:",
"'AB', 'AZ', 'BA','ZY', 'ZZ','AAA'] for s in test_data: n = Solution().titleToNumber(s) print(n) return",
"26 AA -> 27 AB -> 28 ... Example 1: Input: \"A\" Output:",
"''' class Solution: def titleToNumber(self, s: str) -> int: if len(s) == 1:",
"its corresponding column number. For example: A -> 1 B -> 2 C",
"B -> 2 C -> 3 ... Z -> 26 AA -> 27",
"self.titleToNumber( s[-1] ) # n : the length of input string s ##",
"s in test_data: n = Solution().titleToNumber(s) print(n) return if __name__ == '__main__': test_bench()",
"call depth of recursion, which is of O( n ). ## Space Complexity:",
"28 Example 3: Input: \"ZY\" Output: 701 ''' class Solution: def titleToNumber(self, s:",
"depth of recursion, which is of O( n ). ## Space Complexity: O(",
"base case return ord(s)-64 else: # general case return 26*self.titleToNumber( s[:-1] ) +",
"-> 28 ... Example 1: Input: \"A\" Output: 1 Example 2: Input: \"AB\"",
"['A', 'AB', 'AZ', 'BA','ZY', 'ZZ','AAA'] for s in test_data: n = Solution().titleToNumber(s) print(n)",
"is to maintain call stack for recursion, which is of O( n ).",
"of input string s ## Time Complexity: O( n ) # # The",
"A -> 1 B -> 2 C -> 3 ... Z -> 26",
"which is of O( n ). ## Space Complexity: O( n ) #",
"sheet, return its corresponding column number. For example: A -> 1 B ->",
"-> 2 C -> 3 ... Z -> 26 AA -> 27 AB",
"stack for recursion, which is of O( n ). def test_bench(): test_data =",
"'BA','ZY', 'ZZ','AAA'] for s in test_data: n = Solution().titleToNumber(s) print(n) return if __name__",
"'ZZ','AAA'] for s in test_data: n = Solution().titleToNumber(s) print(n) return if __name__ ==",
"). def test_bench(): test_data = ['A', 'AB', 'AZ', 'BA','ZY', 'ZZ','AAA'] for s in",
"the call depth of recursion, which is of O( n ). ## Space",
"+ self.titleToNumber( s[-1] ) # n : the length of input string s",
"else: # general case return 26*self.titleToNumber( s[:-1] ) + self.titleToNumber( s[-1] ) #",
"Output: 701 ''' class Solution: def titleToNumber(self, s: str) -> int: if len(s)",
"O( n ). ## Space Complexity: O( n ) # # The major",
"# The major overhead in time is the call depth of recursion, which",
"s ## Time Complexity: O( n ) # # The major overhead in",
"example: A -> 1 B -> 2 C -> 3 ... Z ->",
"return its corresponding column number. For example: A -> 1 B -> 2",
"length of input string s ## Time Complexity: O( n ) # #",
"== 1: # base case return ord(s)-64 else: # general case return 26*self.titleToNumber(",
"= ['A', 'AB', 'AZ', 'BA','ZY', 'ZZ','AAA'] for s in test_data: n = Solution().titleToNumber(s)",
"Excel sheet, return its corresponding column number. For example: A -> 1 B",
"case return 26*self.titleToNumber( s[:-1] ) + self.titleToNumber( s[-1] ) # n : the",
"major overhead in space is to maintain call stack for recursion, which is",
"is the call depth of recursion, which is of O( n ). ##",
"for s in test_data: n = Solution().titleToNumber(s) print(n) return if __name__ == '__main__':",
"\"AB\" Output: 28 Example 3: Input: \"ZY\" Output: 701 ''' class Solution: def",
"Z -> 26 AA -> 27 AB -> 28 ... Example 1: Input:",
"3: Input: \"ZY\" Output: 701 ''' class Solution: def titleToNumber(self, s: str) ->",
"... Z -> 26 AA -> 27 AB -> 28 ... Example 1:",
"O( n ). def test_bench(): test_data = ['A', 'AB', 'AZ', 'BA','ZY', 'ZZ','AAA'] for",
"For example: A -> 1 B -> 2 C -> 3 ... Z",
"2 C -> 3 ... Z -> 26 AA -> 27 AB ->",
"s[-1] ) # n : the length of input string s ## Time"
] |
[
"def forward(self, x, target): \"\"\" x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P target表示label(M,) \"\"\" assert x.size(1) ==",
"= dim def forward(self, pred, target): pred = pred.log_softmax(dim=self.dim) with torch.no_grad(): true_dist =",
"print(x.shape,true_dist.shape) return self.criterion(x, true_dist) class LabelSmoothingLoss(nn.Module): def __init__(self, classes, smoothing=0.0, dim=-1): super(LabelSmoothingLoss, self).__init__()",
"= 1.0 - smoothing self.smoothing = smoothing self.cls = classes self.dim = dim",
"== self.size x = x.log() true_dist = x.data.clone()#先深复制过来 #print true_dist true_dist.fill_(self.smoothing / (self.size",
"self).__init__() self.confidence = 1.0 - smoothing self.smoothing = smoothing self.cls = classes self.dim",
"super(LabelSmoothingLoss, self).__init__() self.confidence = 1.0 - smoothing self.smoothing = smoothing self.cls = classes",
"1.0 - smoothing self.smoothing = smoothing self.size = size self.true_dist = None def",
"- smoothing self.smoothing = smoothing self.size = size self.true_dist = None def forward(self,",
"smoothing self.size = size self.true_dist = None def forward(self, x, target): \"\"\" x表示输入",
"pred.log_softmax(dim=self.dim) with torch.no_grad(): true_dist = pred.data.clone() true_dist = torch.zeros_like(pred) true_dist.fill_(self.smoothing / (self.cls -",
"nn.KLDivLoss(size_average=False) #self.padding_idx = padding_idx self.confidence = 1.0 - smoothing self.smoothing = smoothing self.size",
"def __init__(self, classes, smoothing=0.0, dim=-1): super(LabelSmoothingLoss, self).__init__() self.confidence = 1.0 - smoothing self.smoothing",
"forward(self, x, target): \"\"\" x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P target表示label(M,) \"\"\" assert x.size(1) == self.size",
"#print true_dist true_dist.fill_(self.smoothing / (self.size - 1))#otherwise的公式 #print true_dist #变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1, target.data.unsqueeze(1),",
"= true_dist # print(x.shape,true_dist.shape) return self.criterion(x, true_dist) class LabelSmoothingLoss(nn.Module): def __init__(self, classes, smoothing=0.0,",
"super(LabelSmoothing, self).__init__() self.criterion = nn.KLDivLoss(size_average=False) #self.padding_idx = padding_idx self.confidence = 1.0 - smoothing",
"(M,N)N个样本,M表示总类数,每一个类的概率log P target表示label(M,) \"\"\" assert x.size(1) == self.size x = x.log() true_dist =",
"= smoothing self.cls = classes self.dim = dim def forward(self, pred, target): pred",
"- 1))#otherwise的公式 #print true_dist #变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) self.true_dist = true_dist #",
"assert x.size(1) == self.size x = x.log() true_dist = x.data.clone()#先深复制过来 #print true_dist true_dist.fill_(self.smoothing",
"x.log() true_dist = x.data.clone()#先深复制过来 #print true_dist true_dist.fill_(self.smoothing / (self.size - 1))#otherwise的公式 #print true_dist",
"LabelSmoothingLoss(nn.Module): def __init__(self, classes, smoothing=0.0, dim=-1): super(LabelSmoothingLoss, self).__init__() self.confidence = 1.0 - smoothing",
"classes self.dim = dim def forward(self, pred, target): pred = pred.log_softmax(dim=self.dim) with torch.no_grad():",
"pred = pred.log_softmax(dim=self.dim) with torch.no_grad(): true_dist = pred.data.clone() true_dist = torch.zeros_like(pred) true_dist.fill_(self.smoothing /",
"true_dist = torch.zeros_like(pred) true_dist.fill_(self.smoothing / (self.cls - 1)) true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) return torch.mean(torch.sum(-true_dist",
"self.cls = classes self.dim = dim def forward(self, pred, target): pred = pred.log_softmax(dim=self.dim)",
"target.data.unsqueeze(1), self.confidence) self.true_dist = true_dist # print(x.shape,true_dist.shape) return self.criterion(x, true_dist) class LabelSmoothingLoss(nn.Module): def",
"def forward(self, pred, target): pred = pred.log_softmax(dim=self.dim) with torch.no_grad(): true_dist = pred.data.clone() true_dist",
"true_dist.fill_(self.smoothing / (self.cls - 1)) true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))",
"x.size(1) == self.size x = x.log() true_dist = x.data.clone()#先深复制过来 #print true_dist true_dist.fill_(self.smoothing /",
"classes, smoothing=0.0, dim=-1): super(LabelSmoothingLoss, self).__init__() self.confidence = 1.0 - smoothing self.smoothing = smoothing",
"size self.true_dist = None def forward(self, x, target): \"\"\" x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P target表示label(M,)",
"# print(x.shape,true_dist.shape) return self.criterion(x, true_dist) class LabelSmoothingLoss(nn.Module): def __init__(self, classes, smoothing=0.0, dim=-1): super(LabelSmoothingLoss,",
"class LabelSmoothingLoss(nn.Module): def __init__(self, classes, smoothing=0.0, dim=-1): super(LabelSmoothingLoss, self).__init__() self.confidence = 1.0 -",
"padding_idx self.confidence = 1.0 - smoothing self.smoothing = smoothing self.size = size self.true_dist",
"true_dist = x.data.clone()#先深复制过来 #print true_dist true_dist.fill_(self.smoothing / (self.size - 1))#otherwise的公式 #print true_dist #变成one-hot编码,1表示按列填充,",
"self).__init__() self.criterion = nn.KLDivLoss(size_average=False) #self.padding_idx = padding_idx self.confidence = 1.0 - smoothing self.smoothing",
"self.criterion = nn.KLDivLoss(size_average=False) #self.padding_idx = padding_idx self.confidence = 1.0 - smoothing self.smoothing =",
"= pred.log_softmax(dim=self.dim) with torch.no_grad(): true_dist = pred.data.clone() true_dist = torch.zeros_like(pred) true_dist.fill_(self.smoothing / (self.cls",
"= None def forward(self, x, target): \"\"\" x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P target表示label(M,) \"\"\" assert",
"self.confidence) self.true_dist = true_dist # print(x.shape,true_dist.shape) return self.criterion(x, true_dist) class LabelSmoothingLoss(nn.Module): def __init__(self,",
"smoothing=0.0): super(LabelSmoothing, self).__init__() self.criterion = nn.KLDivLoss(size_average=False) #self.padding_idx = padding_idx self.confidence = 1.0 -",
"LabelSmoothing(nn.Module): def __init__(self, size, smoothing=0.0): super(LabelSmoothing, self).__init__() self.criterion = nn.KLDivLoss(size_average=False) #self.padding_idx = padding_idx",
"torch.no_grad(): true_dist = pred.data.clone() true_dist = torch.zeros_like(pred) true_dist.fill_(self.smoothing / (self.cls - 1)) true_dist.scatter_(1,",
"smoothing=0.0, dim=-1): super(LabelSmoothingLoss, self).__init__() self.confidence = 1.0 - smoothing self.smoothing = smoothing self.cls",
"self.confidence = 1.0 - smoothing self.smoothing = smoothing self.cls = classes self.dim =",
"= nn.KLDivLoss(size_average=False) #self.padding_idx = padding_idx self.confidence = 1.0 - smoothing self.smoothing = smoothing",
"nn import torch class LabelSmoothing(nn.Module): def __init__(self, size, smoothing=0.0): super(LabelSmoothing, self).__init__() self.criterion =",
"self.smoothing = smoothing self.size = size self.true_dist = None def forward(self, x, target):",
"true_dist.fill_(self.smoothing / (self.size - 1))#otherwise的公式 #print true_dist #变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) self.true_dist",
"= classes self.dim = dim def forward(self, pred, target): pred = pred.log_softmax(dim=self.dim) with",
"/ (self.size - 1))#otherwise的公式 #print true_dist #变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) self.true_dist =",
"x = x.log() true_dist = x.data.clone()#先深复制过来 #print true_dist true_dist.fill_(self.smoothing / (self.size - 1))#otherwise的公式",
"x.data.clone()#先深复制过来 #print true_dist true_dist.fill_(self.smoothing / (self.size - 1))#otherwise的公式 #print true_dist #变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1,",
"x, target): \"\"\" x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P target表示label(M,) \"\"\" assert x.size(1) == self.size x",
"self.true_dist = true_dist # print(x.shape,true_dist.shape) return self.criterion(x, true_dist) class LabelSmoothingLoss(nn.Module): def __init__(self, classes,",
"= x.data.clone()#先深复制过来 #print true_dist true_dist.fill_(self.smoothing / (self.size - 1))#otherwise的公式 #print true_dist #变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字",
"#变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) self.true_dist = true_dist # print(x.shape,true_dist.shape) return self.criterion(x, true_dist)",
"= size self.true_dist = None def forward(self, x, target): \"\"\" x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P",
"\"\"\" x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P target表示label(M,) \"\"\" assert x.size(1) == self.size x = x.log()",
"target): \"\"\" x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P target表示label(M,) \"\"\" assert x.size(1) == self.size x =",
"smoothing self.cls = classes self.dim = dim def forward(self, pred, target): pred =",
"= pred.data.clone() true_dist = torch.zeros_like(pred) true_dist.fill_(self.smoothing / (self.cls - 1)) true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)",
"torch class LabelSmoothing(nn.Module): def __init__(self, size, smoothing=0.0): super(LabelSmoothing, self).__init__() self.criterion = nn.KLDivLoss(size_average=False) #self.padding_idx",
"__init__(self, classes, smoothing=0.0, dim=-1): super(LabelSmoothingLoss, self).__init__() self.confidence = 1.0 - smoothing self.smoothing =",
"target): pred = pred.log_softmax(dim=self.dim) with torch.no_grad(): true_dist = pred.data.clone() true_dist = torch.zeros_like(pred) true_dist.fill_(self.smoothing",
"self.size x = x.log() true_dist = x.data.clone()#先深复制过来 #print true_dist true_dist.fill_(self.smoothing / (self.size -",
"import torch.nn as nn import torch class LabelSmoothing(nn.Module): def __init__(self, size, smoothing=0.0): super(LabelSmoothing,",
"1.0 - smoothing self.smoothing = smoothing self.cls = classes self.dim = dim def",
"#target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) self.true_dist = true_dist # print(x.shape,true_dist.shape) return self.criterion(x, true_dist) class",
"true_dist # print(x.shape,true_dist.shape) return self.criterion(x, true_dist) class LabelSmoothingLoss(nn.Module): def __init__(self, classes, smoothing=0.0, dim=-1):",
"torch.nn as nn import torch class LabelSmoothing(nn.Module): def __init__(self, size, smoothing=0.0): super(LabelSmoothing, self).__init__()",
"self.dim = dim def forward(self, pred, target): pred = pred.log_softmax(dim=self.dim) with torch.no_grad(): true_dist",
"1))#otherwise的公式 #print true_dist #变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) self.true_dist = true_dist # print(x.shape,true_dist.shape)",
"= padding_idx self.confidence = 1.0 - smoothing self.smoothing = smoothing self.size = size",
"true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) self.true_dist = true_dist # print(x.shape,true_dist.shape) return self.criterion(x, true_dist) class LabelSmoothingLoss(nn.Module):",
"- smoothing self.smoothing = smoothing self.cls = classes self.dim = dim def forward(self,",
"= 1.0 - smoothing self.smoothing = smoothing self.size = size self.true_dist = None",
"pred.data.clone() true_dist = torch.zeros_like(pred) true_dist.fill_(self.smoothing / (self.cls - 1)) true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) return",
"#self.padding_idx = padding_idx self.confidence = 1.0 - smoothing self.smoothing = smoothing self.size =",
"dim def forward(self, pred, target): pred = pred.log_softmax(dim=self.dim) with torch.no_grad(): true_dist = pred.data.clone()",
"self.confidence = 1.0 - smoothing self.smoothing = smoothing self.size = size self.true_dist =",
"dim=-1): super(LabelSmoothingLoss, self).__init__() self.confidence = 1.0 - smoothing self.smoothing = smoothing self.cls =",
"def __init__(self, size, smoothing=0.0): super(LabelSmoothing, self).__init__() self.criterion = nn.KLDivLoss(size_average=False) #self.padding_idx = padding_idx self.confidence",
"target表示label(M,) \"\"\" assert x.size(1) == self.size x = x.log() true_dist = x.data.clone()#先深复制过来 #print",
"\"\"\" assert x.size(1) == self.size x = x.log() true_dist = x.data.clone()#先深复制过来 #print true_dist",
"pred, target): pred = pred.log_softmax(dim=self.dim) with torch.no_grad(): true_dist = pred.data.clone() true_dist = torch.zeros_like(pred)",
"as nn import torch class LabelSmoothing(nn.Module): def __init__(self, size, smoothing=0.0): super(LabelSmoothing, self).__init__() self.criterion",
"class LabelSmoothing(nn.Module): def __init__(self, size, smoothing=0.0): super(LabelSmoothing, self).__init__() self.criterion = nn.KLDivLoss(size_average=False) #self.padding_idx =",
"smoothing self.smoothing = smoothing self.cls = classes self.dim = dim def forward(self, pred,",
"true_dist) class LabelSmoothingLoss(nn.Module): def __init__(self, classes, smoothing=0.0, dim=-1): super(LabelSmoothingLoss, self).__init__() self.confidence = 1.0",
"x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P target表示label(M,) \"\"\" assert x.size(1) == self.size x = x.log() true_dist",
"= torch.zeros_like(pred) true_dist.fill_(self.smoothing / (self.cls - 1)) true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) return torch.mean(torch.sum(-true_dist *",
"self.smoothing = smoothing self.cls = classes self.dim = dim def forward(self, pred, target):",
"smoothing self.smoothing = smoothing self.size = size self.true_dist = None def forward(self, x,",
"with torch.no_grad(): true_dist = pred.data.clone() true_dist = torch.zeros_like(pred) true_dist.fill_(self.smoothing / (self.cls - 1))",
"import torch class LabelSmoothing(nn.Module): def __init__(self, size, smoothing=0.0): super(LabelSmoothing, self).__init__() self.criterion = nn.KLDivLoss(size_average=False)",
"true_dist = pred.data.clone() true_dist = torch.zeros_like(pred) true_dist.fill_(self.smoothing / (self.cls - 1)) true_dist.scatter_(1, target.data.unsqueeze(1),",
"torch.zeros_like(pred) true_dist.fill_(self.smoothing / (self.cls - 1)) true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) return torch.mean(torch.sum(-true_dist * pred,",
"P target表示label(M,) \"\"\" assert x.size(1) == self.size x = x.log() true_dist = x.data.clone()#先深复制过来",
"None def forward(self, x, target): \"\"\" x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P target表示label(M,) \"\"\" assert x.size(1)",
"return self.criterion(x, true_dist) class LabelSmoothingLoss(nn.Module): def __init__(self, classes, smoothing=0.0, dim=-1): super(LabelSmoothingLoss, self).__init__() self.confidence",
"(self.size - 1))#otherwise的公式 #print true_dist #变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) self.true_dist = true_dist",
"#print true_dist #变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) self.true_dist = true_dist # print(x.shape,true_dist.shape) return",
"self.true_dist = None def forward(self, x, target): \"\"\" x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P target表示label(M,) \"\"\"",
"forward(self, pred, target): pred = pred.log_softmax(dim=self.dim) with torch.no_grad(): true_dist = pred.data.clone() true_dist =",
"self.size = size self.true_dist = None def forward(self, x, target): \"\"\" x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log",
"__init__(self, size, smoothing=0.0): super(LabelSmoothing, self).__init__() self.criterion = nn.KLDivLoss(size_average=False) #self.padding_idx = padding_idx self.confidence =",
"self.criterion(x, true_dist) class LabelSmoothingLoss(nn.Module): def __init__(self, classes, smoothing=0.0, dim=-1): super(LabelSmoothingLoss, self).__init__() self.confidence =",
"= smoothing self.size = size self.true_dist = None def forward(self, x, target): \"\"\"",
"size, smoothing=0.0): super(LabelSmoothing, self).__init__() self.criterion = nn.KLDivLoss(size_average=False) #self.padding_idx = padding_idx self.confidence = 1.0",
"true_dist true_dist.fill_(self.smoothing / (self.size - 1))#otherwise的公式 #print true_dist #变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)",
"true_dist #变成one-hot编码,1表示按列填充, #target.data.unsqueeze(1)表示索引,confidence表示填充的数字 true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence) self.true_dist = true_dist # print(x.shape,true_dist.shape) return self.criterion(x,",
"= x.log() true_dist = x.data.clone()#先深复制过来 #print true_dist true_dist.fill_(self.smoothing / (self.size - 1))#otherwise的公式 #print"
] |
[
"import numpy as np import matplotlib.pyplot as plt import gym import time import",
"= env.action_space.n x = Input((1,) + env.observation_space.shape) y = Flatten()(x) y = Dense(16)(y)",
"y = Activation('linear')(y) model = Model(x, y) memory = SequentialMemory(limit=10000, window_length=1) # policy",
"state, reward, done, _ = env.step(2) state_list.append(reward) env.render() env.render(close=True) dqn.test(env, nb_episodes=5, visualize=True) env.render(close=True)",
"- 0.05 return tuple(step) def _reset(self): return self.env.reset() def _seed(self, seed=None): return self.env.seed(seed)",
"_reset(self): return self.env.reset() def _seed(self, seed=None): return self.env.seed(seed) def _render(self, mode='human', close=False): return",
"keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge from keras.optimizers import",
"from keras.optimizers import Adam, RMSprop from keras.callbacks import History from keras import backend",
"rl.agents.dqn import DQNAgent from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory import SequentialMemory, EpisodeParameterMemory",
"memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32, enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy) dqn.compile(Adam(), metrics=['mae']) hist = dqn.fit(env,",
"0), 0))[0]) state, reward, done, _ = env.step(2) state_list.append(reward) env.render() env.render(close=True) dqn.test(env, nb_episodes=5,",
"= MountainCarEnv() env.seed() nb_actions = env.action_space.n x = Input((1,) + env.observation_space.shape) y =",
"import copy from keras.models import Sequential, Model from keras.layers import Dense, Activation, Flatten,",
"copy from keras.models import Sequential, Model from keras.layers import Dense, Activation, Flatten, Lambda,",
"import Env, Space, spaces from gym.utils import seeding from rl.agents.dqn import DQNAgent from",
"return self.env.render(mode, close) def _close(self): return self.env.close() env = MountainCarEnv() env.seed() nb_actions =",
"seed=None): return self.env.seed(seed) def _render(self, mode='human', close=False): return self.env.render(mode, close) def _close(self): return",
"import SARSAAgent from rl.callbacks import TrainEpisodeLogger, CallbackList class MountainCarEnv(Env): metadata = {'render.modes': ['human',",
"import tensorflow as tf from gym import Env, Space, spaces from gym.utils import",
"self.env = gym.make('MountainCar-v0') self.action_space = self.env.action_space self.observation_space = self.env.observation_space def _step(self, action): step",
"None: self.env = gym.make('MountainCar-v0') self.action_space = self.env.action_space self.observation_space = self.env.observation_space def _step(self, action):",
"Input, Reshape, concatenate, Merge from keras.optimizers import Adam, RMSprop from keras.callbacks import History",
"return self.env.close() env = MountainCarEnv() env.seed() nb_actions = env.action_space.n x = Input((1,) +",
"= np.abs(step[0][1]) - 0.05 return tuple(step) def _reset(self): return self.env.reset() def _seed(self, seed=None):",
"close=False): return self.env.render(mode, close) def _close(self): return self.env.close() env = MountainCarEnv() env.seed() nb_actions",
"= Dense(16)(y) y = Activation('relu')(y) y = Dense(nb_actions)(y) y = Activation('linear')(y) model =",
"EpsGreedyQPolicy from rl.memory import SequentialMemory, EpisodeParameterMemory from rl.agents.cem import CEMAgent from rl.agents import",
"BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory import SequentialMemory, EpisodeParameterMemory from rl.agents.cem import CEMAgent from rl.agents",
"self.observation_space = self.env.observation_space def _step(self, action): step = self.env.step(action) step = list(step) step[1]",
"= BoltzmannQPolicy() policy = EpsGreedyQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32,",
"BoltzmannQPolicy() policy = EpsGreedyQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32, enable_dueling_network=False,",
"from rl.memory import SequentialMemory, EpisodeParameterMemory from rl.agents.cem import CEMAgent from rl.agents import SARSAAgent",
"return self.env.seed(seed) def _render(self, mode='human', close=False): return self.env.render(mode, close) def _close(self): return self.env.close()",
"Env, Space, spaces from gym.utils import seeding from rl.agents.dqn import DQNAgent from rl.policy",
"y = Flatten()(x) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y) y",
"import gym import time import copy from keras.models import Sequential, Model from keras.layers",
"_close(self): return self.env.close() env = MountainCarEnv() env.seed() nb_actions = env.action_space.n x = Input((1,)",
"metadata = {'render.modes': ['human', 'rgb_array']} def __init__(self) -> None: self.env = gym.make('MountainCar-v0') self.action_space",
"= Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y) y =",
"= self.env.action_space self.observation_space = self.env.observation_space def _step(self, action): step = self.env.step(action) step =",
"env.seed() nb_actions = env.action_space.n x = Input((1,) + env.observation_space.shape) y = Flatten()(x) y",
"from keras.callbacks import History from keras import backend as K import tensorflow as",
"from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge from keras.optimizers",
"History from keras import backend as K import tensorflow as tf from gym",
"self.env.close() env = MountainCarEnv() env.seed() nb_actions = env.action_space.n x = Input((1,) + env.observation_space.shape)",
"= Dense(nb_actions)(y) y = Activation('linear')(y) model = Model(x, y) memory = SequentialMemory(limit=10000, window_length=1)",
"= Model(x, y) memory = SequentialMemory(limit=10000, window_length=1) # policy = BoltzmannQPolicy() policy =",
"DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32, enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy) dqn.compile(Adam(), metrics=['mae']) hist",
"hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None) state = env.reset() action = env.action_space.sample()",
"keras import backend as K import tensorflow as tf from gym import Env,",
"_render(self, mode='human', close=False): return self.env.render(mode, close) def _close(self): return self.env.close() env = MountainCarEnv()",
"= dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None) state = env.reset() action = env.action_space.sample() print(action)",
"step = list(step) step[1] = np.abs(step[0][1]) - 0.05 return tuple(step) def _reset(self): return",
"_step(self, action): step = self.env.step(action) step = list(step) step[1] = np.abs(step[0][1]) - 0.05",
"MountainCarEnv(Env): metadata = {'render.modes': ['human', 'rgb_array']} def __init__(self) -> None: self.env = gym.make('MountainCar-v0')",
"= DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32, enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy) dqn.compile(Adam(), metrics=['mae'])",
"self.action_space = self.env.action_space self.observation_space = self.env.observation_space def _step(self, action): step = self.env.step(action) step",
"y = Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y) y",
"from gym import Env, Space, spaces from gym.utils import seeding from rl.agents.dqn import",
"env = MountainCarEnv() env.seed() nb_actions = env.action_space.n x = Input((1,) + env.observation_space.shape) y",
"step = self.env.step(action) step = list(step) step[1] = np.abs(step[0][1]) - 0.05 return tuple(step)",
"gamma=.9, batch_size=32, enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy) dqn.compile(Adam(), metrics=['mae']) hist = dqn.fit(env, nb_steps=10000, visualize=False,",
"state_list= [] for i in range(500): action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0]) state, reward,",
"self.env.render(mode, close) def _close(self): return self.env.close() env = MountainCarEnv() env.seed() nb_actions = env.action_space.n",
"action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0]) state, reward, done, _ = env.step(2) state_list.append(reward) env.render()",
"-> None: self.env = gym.make('MountainCar-v0') self.action_space = self.env.action_space self.observation_space = self.env.observation_space def _step(self,",
"def _seed(self, seed=None): return self.env.seed(seed) def _render(self, mode='human', close=False): return self.env.render(mode, close) def",
"verbose=2, callbacks=None) state = env.reset() action = env.action_space.sample() print(action) state_list= [] for i",
"Flatten()(x) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y)",
"np import matplotlib.pyplot as plt import gym import time import copy from keras.models",
"nb_actions = env.action_space.n x = Input((1,) + env.observation_space.shape) y = Flatten()(x) y =",
"= self.env.observation_space def _step(self, action): step = self.env.step(action) step = list(step) step[1] =",
"import CEMAgent from rl.agents import SARSAAgent from rl.callbacks import TrainEpisodeLogger, CallbackList class MountainCarEnv(Env):",
"Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y)",
"<filename>studzie/keras_gym/mountain_car_v0.py import numpy as np import matplotlib.pyplot as plt import gym import time",
"policy = EpsGreedyQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32, enable_dueling_network=False, dueling_type='avg',",
"import TrainEpisodeLogger, CallbackList class MountainCarEnv(Env): metadata = {'render.modes': ['human', 'rgb_array']} def __init__(self) ->",
"= Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(nb_actions)(y) y =",
"batch_size=32, enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy) dqn.compile(Adam(), metrics=['mae']) hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2,",
"enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy) dqn.compile(Adam(), metrics=['mae']) hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None)",
"from gym.utils import seeding from rl.agents.dqn import DQNAgent from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy",
"range(500): action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0]) state, reward, done, _ = env.step(2) state_list.append(reward)",
"target_model_update=.1, policy=policy) dqn.compile(Adam(), metrics=['mae']) hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None) state =",
"spaces from gym.utils import seeding from rl.agents.dqn import DQNAgent from rl.policy import BoltzmannQPolicy,",
"self.env.observation_space def _step(self, action): step = self.env.step(action) step = list(step) step[1] = np.abs(step[0][1])",
"metrics=['mae']) hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None) state = env.reset() action =",
"matplotlib.pyplot as plt import gym import time import copy from keras.models import Sequential,",
"CallbackList class MountainCarEnv(Env): metadata = {'render.modes': ['human', 'rgb_array']} def __init__(self) -> None: self.env",
"i in range(500): action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0]) state, reward, done, _ =",
"rl.agents.cem import CEMAgent from rl.agents import SARSAAgent from rl.callbacks import TrainEpisodeLogger, CallbackList class",
"visualize=False, verbose=2, callbacks=None) state = env.reset() action = env.action_space.sample() print(action) state_list= [] for",
"gym import Env, Space, spaces from gym.utils import seeding from rl.agents.dqn import DQNAgent",
"in range(500): action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0]) state, reward, done, _ = env.step(2)",
"plt import gym import time import copy from keras.models import Sequential, Model from",
"self.env.reset() def _seed(self, seed=None): return self.env.seed(seed) def _render(self, mode='human', close=False): return self.env.render(mode, close)",
"CEMAgent from rl.agents import SARSAAgent from rl.callbacks import TrainEpisodeLogger, CallbackList class MountainCarEnv(Env): metadata",
"state = env.reset() action = env.action_space.sample() print(action) state_list= [] for i in range(500):",
"memory = SequentialMemory(limit=10000, window_length=1) # policy = BoltzmannQPolicy() policy = EpsGreedyQPolicy() dqn =",
"from keras import backend as K import tensorflow as tf from gym import",
"# policy = BoltzmannQPolicy() policy = EpsGreedyQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000,",
"Flatten, Lambda, Input, Reshape, concatenate, Merge from keras.optimizers import Adam, RMSprop from keras.callbacks",
"np.abs(step[0][1]) - 0.05 return tuple(step) def _reset(self): return self.env.reset() def _seed(self, seed=None): return",
"Adam, RMSprop from keras.callbacks import History from keras import backend as K import",
"DQNAgent from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory import SequentialMemory, EpisodeParameterMemory from rl.agents.cem",
"Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y)",
"from rl.agents.dqn import DQNAgent from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory import SequentialMemory,",
"['human', 'rgb_array']} def __init__(self) -> None: self.env = gym.make('MountainCar-v0') self.action_space = self.env.action_space self.observation_space",
"Merge from keras.optimizers import Adam, RMSprop from keras.callbacks import History from keras import",
"import time import copy from keras.models import Sequential, Model from keras.layers import Dense,",
"gym.make('MountainCar-v0') self.action_space = self.env.action_space self.observation_space = self.env.observation_space def _step(self, action): step = self.env.step(action)",
"as plt import gym import time import copy from keras.models import Sequential, Model",
"def _render(self, mode='human', close=False): return self.env.render(mode, close) def _close(self): return self.env.close() env =",
"keras.callbacks import History from keras import backend as K import tensorflow as tf",
"policy=policy) dqn.compile(Adam(), metrics=['mae']) hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None) state = env.reset()",
"rl.callbacks import TrainEpisodeLogger, CallbackList class MountainCarEnv(Env): metadata = {'render.modes': ['human', 'rgb_array']} def __init__(self)",
"import Sequential, Model from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate,",
"Lambda, Input, Reshape, concatenate, Merge from keras.optimizers import Adam, RMSprop from keras.callbacks import",
"policy = BoltzmannQPolicy() policy = EpsGreedyQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9,",
"y = Activation('relu')(y) y = Dense(nb_actions)(y) y = Activation('linear')(y) model = Model(x, y)",
"env.action_space.sample() print(action) state_list= [] for i in range(500): action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0])",
"step[1] = np.abs(step[0][1]) - 0.05 return tuple(step) def _reset(self): return self.env.reset() def _seed(self,",
"import matplotlib.pyplot as plt import gym import time import copy from keras.models import",
"rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory import SequentialMemory, EpisodeParameterMemory from rl.agents.cem import CEMAgent",
"def _close(self): return self.env.close() env = MountainCarEnv() env.seed() nb_actions = env.action_space.n x =",
"from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory import SequentialMemory, EpisodeParameterMemory from rl.agents.cem import",
"time import copy from keras.models import Sequential, Model from keras.layers import Dense, Activation,",
"concatenate, Merge from keras.optimizers import Adam, RMSprop from keras.callbacks import History from keras",
"env.action_space.n x = Input((1,) + env.observation_space.shape) y = Flatten()(x) y = Dense(16)(y) y",
"y = Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y) y",
"import SequentialMemory, EpisodeParameterMemory from rl.agents.cem import CEMAgent from rl.agents import SARSAAgent from rl.callbacks",
"= env.action_space.sample() print(action) state_list= [] for i in range(500): action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0),",
"mode='human', close=False): return self.env.render(mode, close) def _close(self): return self.env.close() env = MountainCarEnv() env.seed()",
"as K import tensorflow as tf from gym import Env, Space, spaces from",
"= Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y) y =",
"x = Input((1,) + env.observation_space.shape) y = Flatten()(x) y = Dense(16)(y) y =",
"gym import time import copy from keras.models import Sequential, Model from keras.layers import",
"window_length=1) # policy = BoltzmannQPolicy() policy = EpsGreedyQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory,",
"list(step) step[1] = np.abs(step[0][1]) - 0.05 return tuple(step) def _reset(self): return self.env.reset() def",
"y = Dense(16)(y) y = Activation('relu')(y) y = Dense(nb_actions)(y) y = Activation('linear')(y) model",
"dueling_type='avg', target_model_update=.1, policy=policy) dqn.compile(Adam(), metrics=['mae']) hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None) state",
"keras.optimizers import Adam, RMSprop from keras.callbacks import History from keras import backend as",
"= self.env.step(action) step = list(step) step[1] = np.abs(step[0][1]) - 0.05 return tuple(step) def",
"from keras.models import Sequential, Model from keras.layers import Dense, Activation, Flatten, Lambda, Input,",
"dqn.compile(Adam(), metrics=['mae']) hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None) state = env.reset() action",
"nb_steps_warmup=1000, gamma=.9, batch_size=32, enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy) dqn.compile(Adam(), metrics=['mae']) hist = dqn.fit(env, nb_steps=10000,",
"def __init__(self) -> None: self.env = gym.make('MountainCar-v0') self.action_space = self.env.action_space self.observation_space = self.env.observation_space",
"Sequential, Model from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge",
"env.reset() action = env.action_space.sample() print(action) state_list= [] for i in range(500): action =",
"Model from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge from",
"import DQNAgent from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory import SequentialMemory, EpisodeParameterMemory from",
"__init__(self) -> None: self.env = gym.make('MountainCar-v0') self.action_space = self.env.action_space self.observation_space = self.env.observation_space def",
"nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32, enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy) dqn.compile(Adam(), metrics=['mae']) hist =",
"import History from keras import backend as K import tensorflow as tf from",
"action = env.action_space.sample() print(action) state_list= [] for i in range(500): action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state,",
"EpisodeParameterMemory from rl.agents.cem import CEMAgent from rl.agents import SARSAAgent from rl.callbacks import TrainEpisodeLogger,",
"Input((1,) + env.observation_space.shape) y = Flatten()(x) y = Dense(16)(y) y = Activation('relu')(y) y",
"Dense(16)(y) y = Activation('relu')(y) y = Dense(nb_actions)(y) y = Activation('linear')(y) model = Model(x,",
"Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge from keras.optimizers import Adam, RMSprop from",
"Activation('linear')(y) model = Model(x, y) memory = SequentialMemory(limit=10000, window_length=1) # policy = BoltzmannQPolicy()",
"dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None) state = env.reset() action = env.action_space.sample() print(action) state_list=",
"np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0]) state, reward, done, _ = env.step(2) state_list.append(reward) env.render() env.render(close=True) dqn.test(env,",
"keras.models import Sequential, Model from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape,",
"Model(x, y) memory = SequentialMemory(limit=10000, window_length=1) # policy = BoltzmannQPolicy() policy = EpsGreedyQPolicy()",
"y) memory = SequentialMemory(limit=10000, window_length=1) # policy = BoltzmannQPolicy() policy = EpsGreedyQPolicy() dqn",
"backend as K import tensorflow as tf from gym import Env, Space, spaces",
"SequentialMemory, EpisodeParameterMemory from rl.agents.cem import CEMAgent from rl.agents import SARSAAgent from rl.callbacks import",
"as tf from gym import Env, Space, spaces from gym.utils import seeding from",
"self.env.step(action) step = list(step) step[1] = np.abs(step[0][1]) - 0.05 return tuple(step) def _reset(self):",
"0.05 return tuple(step) def _reset(self): return self.env.reset() def _seed(self, seed=None): return self.env.seed(seed) def",
"TrainEpisodeLogger, CallbackList class MountainCarEnv(Env): metadata = {'render.modes': ['human', 'rgb_array']} def __init__(self) -> None:",
"as np import matplotlib.pyplot as plt import gym import time import copy from",
"callbacks=None) state = env.reset() action = env.action_space.sample() print(action) state_list= [] for i in",
"Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(nb_actions)(y) y = Activation('linear')(y)",
"SARSAAgent from rl.callbacks import TrainEpisodeLogger, CallbackList class MountainCarEnv(Env): metadata = {'render.modes': ['human', 'rgb_array']}",
"'rgb_array']} def __init__(self) -> None: self.env = gym.make('MountainCar-v0') self.action_space = self.env.action_space self.observation_space =",
"Activation('relu')(y) y = Dense(nb_actions)(y) y = Activation('linear')(y) model = Model(x, y) memory =",
"numpy as np import matplotlib.pyplot as plt import gym import time import copy",
"EpsGreedyQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32, enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy)",
"class MountainCarEnv(Env): metadata = {'render.modes': ['human', 'rgb_array']} def __init__(self) -> None: self.env =",
"return self.env.reset() def _seed(self, seed=None): return self.env.seed(seed) def _render(self, mode='human', close=False): return self.env.render(mode,",
"import seeding from rl.agents.dqn import DQNAgent from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory",
"Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(nb_actions)(y)",
"= {'render.modes': ['human', 'rgb_array']} def __init__(self) -> None: self.env = gym.make('MountainCar-v0') self.action_space =",
"RMSprop from keras.callbacks import History from keras import backend as K import tensorflow",
"return tuple(step) def _reset(self): return self.env.reset() def _seed(self, seed=None): return self.env.seed(seed) def _render(self,",
"nb_steps=10000, visualize=False, verbose=2, callbacks=None) state = env.reset() action = env.action_space.sample() print(action) state_list= []",
"from rl.agents.cem import CEMAgent from rl.agents import SARSAAgent from rl.callbacks import TrainEpisodeLogger, CallbackList",
"from rl.agents import SARSAAgent from rl.callbacks import TrainEpisodeLogger, CallbackList class MountainCarEnv(Env): metadata =",
"= env.reset() action = env.action_space.sample() print(action) state_list= [] for i in range(500): action",
"= EpsGreedyQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32, enable_dueling_network=False, dueling_type='avg', target_model_update=.1,",
"self.env.seed(seed) def _render(self, mode='human', close=False): return self.env.render(mode, close) def _close(self): return self.env.close() env",
"SequentialMemory(limit=10000, window_length=1) # policy = BoltzmannQPolicy() policy = EpsGreedyQPolicy() dqn = DQNAgent(model=model, nb_actions=nb_actions,",
"K import tensorflow as tf from gym import Env, Space, spaces from gym.utils",
"close) def _close(self): return self.env.close() env = MountainCarEnv() env.seed() nb_actions = env.action_space.n x",
"gym.utils import seeding from rl.agents.dqn import DQNAgent from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy from",
"_seed(self, seed=None): return self.env.seed(seed) def _render(self, mode='human', close=False): return self.env.render(mode, close) def _close(self):",
"def _reset(self): return self.env.reset() def _seed(self, seed=None): return self.env.seed(seed) def _render(self, mode='human', close=False):",
"env.observation_space.shape) y = Flatten()(x) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y)",
"Space, spaces from gym.utils import seeding from rl.agents.dqn import DQNAgent from rl.policy import",
"import Adam, RMSprop from keras.callbacks import History from keras import backend as K",
"= gym.make('MountainCar-v0') self.action_space = self.env.action_space self.observation_space = self.env.observation_space def _step(self, action): step =",
"y = Activation('relu')(y) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(nb_actions)(y) y",
"[] for i in range(500): action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0]) state, reward, done,",
"import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge from keras.optimizers import Adam,",
"rl.agents import SARSAAgent from rl.callbacks import TrainEpisodeLogger, CallbackList class MountainCarEnv(Env): metadata = {'render.modes':",
"model = Model(x, y) memory = SequentialMemory(limit=10000, window_length=1) # policy = BoltzmannQPolicy() policy",
"seeding from rl.agents.dqn import DQNAgent from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory import",
"from rl.callbacks import TrainEpisodeLogger, CallbackList class MountainCarEnv(Env): metadata = {'render.modes': ['human', 'rgb_array']} def",
"import backend as K import tensorflow as tf from gym import Env, Space,",
"+ env.observation_space.shape) y = Flatten()(x) y = Dense(16)(y) y = Activation('relu')(y) y =",
"MountainCarEnv() env.seed() nb_actions = env.action_space.n x = Input((1,) + env.observation_space.shape) y = Flatten()(x)",
"= Flatten()(x) y = Dense(16)(y) y = Activation('relu')(y) y = Dense(16)(y) y =",
"= Activation('linear')(y) model = Model(x, y) memory = SequentialMemory(limit=10000, window_length=1) # policy =",
"def _step(self, action): step = self.env.step(action) step = list(step) step[1] = np.abs(step[0][1]) -",
"self.env.action_space self.observation_space = self.env.observation_space def _step(self, action): step = self.env.step(action) step = list(step)",
"rl.memory import SequentialMemory, EpisodeParameterMemory from rl.agents.cem import CEMAgent from rl.agents import SARSAAgent from",
"= SequentialMemory(limit=10000, window_length=1) # policy = BoltzmannQPolicy() policy = EpsGreedyQPolicy() dqn = DQNAgent(model=model,",
"= np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0]) state, reward, done, _ = env.step(2) state_list.append(reward) env.render() env.render(close=True)",
"{'render.modes': ['human', 'rgb_array']} def __init__(self) -> None: self.env = gym.make('MountainCar-v0') self.action_space = self.env.action_space",
"= Activation('relu')(y) y = Dense(nb_actions)(y) y = Activation('linear')(y) model = Model(x, y) memory",
"tuple(step) def _reset(self): return self.env.reset() def _seed(self, seed=None): return self.env.seed(seed) def _render(self, mode='human',",
"Reshape, concatenate, Merge from keras.optimizers import Adam, RMSprop from keras.callbacks import History from",
"import BoltzmannQPolicy, EpsGreedyQPolicy from rl.memory import SequentialMemory, EpisodeParameterMemory from rl.agents.cem import CEMAgent from",
"dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32, enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy) dqn.compile(Adam(),",
"Dense(nb_actions)(y) y = Activation('linear')(y) model = Model(x, y) memory = SequentialMemory(limit=10000, window_length=1) #",
"y = Dense(nb_actions)(y) y = Activation('linear')(y) model = Model(x, y) memory = SequentialMemory(limit=10000,",
"tf from gym import Env, Space, spaces from gym.utils import seeding from rl.agents.dqn",
"action): step = self.env.step(action) step = list(step) step[1] = np.abs(step[0][1]) - 0.05 return",
"= Input((1,) + env.observation_space.shape) y = Flatten()(x) y = Dense(16)(y) y = Activation('relu')(y)",
"tensorflow as tf from gym import Env, Space, spaces from gym.utils import seeding",
"= list(step) step[1] = np.abs(step[0][1]) - 0.05 return tuple(step) def _reset(self): return self.env.reset()",
"for i in range(500): action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0]) state, reward, done, _",
"Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge from keras.optimizers import Adam, RMSprop",
"print(action) state_list= [] for i in range(500): action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0]) state,",
"0))[0]) state, reward, done, _ = env.step(2) state_list.append(reward) env.render() env.render(close=True) dqn.test(env, nb_episodes=5, visualize=True)"
] |
[
"self.juncinfo = self.get_juncinfo() def get_juncinfo(self): \"\"\" get junction info \"\"\" dct = dict()",
"def get_junction_of_species_by_partiallocation(species, partiallocation): \"\"\" species (e.g., dyak) location is partial location (e.g., 3L_17475)",
"in dyak dct_as1 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\") # alternative splicing junction (long) of tra",
"self.species = species self.sex = sex self.tissue = tissue # self.filename = self.species",
"focalgene.FocalGene(species, geneid, \"M\") for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: jun =",
"get_junction_of_species_by_partiallocation(species, partiallocation): \"\"\" species (e.g., dyak) location is partial location (e.g., 3L_17475) output",
"FocalJunction(species, sex, tissue) if location in jun.juncinfo: dct[species + \"_\" + sex +",
"mdct.keys(): mdct[sst] = dict() for range in d[sst].keys(): mdct[sst][range] = d[sst][range] return mdct",
"dct_cs = get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\") # alternative splicing junction (short) of tra in dyak",
"in self.lines: # (juncid, dinucleotide, intron_size, annostatus, gmcode, regcode, geneassign, cov, lirt, rirt,",
"coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") for sex in",
"dyak dct_as2 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17475015_+\") mdct = merge_dcts([dct_cs, dct_as1, dct_as2]) mpd = pd.DataFrame.from_dict(mdct)",
"sex, tissue): self.species = species self.sex = sex self.tissue = tissue # self.filename",
"+ \"_\" + tissue, partiallocation, range, jun.juncinfo[range]) def get_junction_of_species_by_location(species, location): \"\"\" species (e.g.,",
"\"_\" + tissue][location] = jun.juncinfo[location] else: dct[species + \"_\" + sex + \"_\"",
"= int(bs2) juncstart = chromStart + bs1 + 1 juncend = chromEnd -",
"range in jun.juncinfo.keys(): if range.startswith(partiallocation): print(species + \"_\" + sex + \"_\" +",
"sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: dct[species + \"_\" + sex + \"_\" +",
"\"\"\" get junction info \"\"\" dct = dict() for line in self.lines: #",
"(e.g., 3L_17475) output is all possible junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species]",
"whole range (e.g., 3L_17475357_17475426_+) output is a dict of junctions with coverage \"\"\"",
"partial location (e.g., 3L_17475) output is all possible junctions with coverage \"\"\" geneid",
"not sst in mdct.keys(): mdct[sst] = dict() for range in d[sst].keys(): mdct[sst][range] =",
"dyak dct_cs = get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\") # alternative splicing junction (short) of tra in",
"def get_juncinfo(self): \"\"\" get junction info \"\"\" dct = dict() for line in",
"line in self.lines: # (juncid, dinucleotide, intron_size, annostatus, gmcode, regcode, geneassign, cov, lirt,",
"import sharedinfo import pandas as pd from sharedinfo import exist_file, get_lines def get_junction_of_species_by_partiallocation(species,",
"gmcode, regcode, geneassign, cov, lirt, rirt, irt, dncov, ancov, numsamps) = line.rstrip().split(\"\\t\") #",
"tra in dyak dct_as1 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\") # alternative splicing junction (long) of",
"\"\"\" species (e.g., dyak) location is partial location (e.g., 3L_17475) output is all",
"= focalgene.FocalGene(species, geneid, \"M\") dct = dict() for sex in sharedinfo.ordered_sex: for tissue",
"sst in d.keys(): # species_sex_tissue if not sst in mdct.keys(): mdct[sst] = dict()",
"+ tissue, partiallocation, range, jun.juncinfo[range]) def get_junction_of_species_by_location(species, location): \"\"\" species (e.g., dyak) location",
"dct_as1 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\") # alternative splicing junction (long) of tra in dyak",
"sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") for sex in sharedinfo.ordered_sex: for tissue in",
"sex + \"_\" + tissue][location] = 0 return dct def merge_dcts(dcts): mdct =",
"dct[species + \"_\" + sex + \"_\" + tissue][location] = jun.juncinfo[location] else: dct[species",
"+ sex + \"_\" + tissue] = dict() jun = FocalJunction(species, sex, tissue)",
"thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\") chromStart = int(chromStart) chromEnd =",
"geneid, \"M\") for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: jun = FocalJunction(species,",
"sex + \"_\" + tissue] = dict() jun = FocalJunction(species, sex, tissue) if",
"<reponame>haiwangyang/PGMF<gh_stars>0 #!/usr/bin/env python \"\"\" Purpose: Handling normalized read count of genes \"\"\" import",
"as pd from sharedinfo import exist_file, get_lines def get_junction_of_species_by_partiallocation(species, partiallocation): \"\"\" species (e.g.,",
"output is a dict of junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen",
"return dct def merge_dcts(dcts): mdct = dict() for d in dcts: for sst",
"mdct class FocalJunction: \"\"\"FocalJunction object\"\"\" def __init__(self, species, sex, tissue): self.species = species",
"jun.juncinfo: dct[species + \"_\" + sex + \"_\" + tissue][location] = jun.juncinfo[location] else:",
"3L_17475) output is all possible junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen",
"geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") dct = dict() for sex",
"# (juncid, dinucleotide, intron_size, annostatus, gmcode, regcode, geneassign, cov, lirt, rirt, irt, dncov,",
"# alternative splicing junction (short) of tra in dyak dct_as1 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\")",
"if range.startswith(partiallocation): print(species + \"_\" + sex + \"_\" + tissue, partiallocation, range,",
"\"\"\"FocalJunction object\"\"\" def __init__(self, species, sex, tissue): self.species = species self.sex = sex",
"jun.juncinfo[range]) def get_junction_of_species_by_location(species, location): \"\"\" species (e.g., dyak) location is whole range (e.g.,",
"dict() for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: dct[species + \"_\" +",
"jun.juncinfo.keys(): if range.startswith(partiallocation): print(species + \"_\" + sex + \"_\" + tissue, partiallocation,",
"= chromStart + bs1 + 1 juncend = chromEnd - bs2 dct[chrom +",
"self.species + \"_\" + sex + \"_\" + tissue + \".merged.juncs\" # spanki",
"= int(chromStart) chromEnd = int(chromEnd) bs1, bs2 = blockSizes.split(\",\") bs1 = int(bs1) bs2",
"= self.species + \"_\" + sex + \"_\" + tissue + \".merged.juncs\" #",
"def get_junction_of_species_by_location(species, location): \"\"\" species (e.g., dyak) location is whole range (e.g., 3L_17475357_17475426_+)",
"+ \"_\" + sex + \"_\" + tissue][location] = 0 return dct def",
"(e.g., dyak) location is whole range (e.g., 3L_17475357_17475426_+) output is a dict of",
"in jun.juncinfo: dct[species + \"_\" + sex + \"_\" + tissue][location] = jun.juncinfo[location]",
"bs2 dct[chrom + \":\" + str(juncstart) + \"-\" + str(juncend) + \"_\" +",
"is all possible junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species,",
"#!/usr/bin/env python \"\"\" Purpose: Handling normalized read count of genes \"\"\" import focalannotation",
"pandas as pd from sharedinfo import exist_file, get_lines def get_junction_of_species_by_partiallocation(species, partiallocation): \"\"\" species",
"range (e.g., 3L_17475357_17475426_+) output is a dict of junctions with coverage \"\"\" geneid",
"all possible junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid,",
"+ bs1 + 1 juncend = chromEnd - bs2 dct[chrom + \":\" +",
"int(chromStart) chromEnd = int(chromEnd) bs1, bs2 = blockSizes.split(\",\") bs1 = int(bs1) bs2 =",
"\"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") dct = dict() for",
"bs2 = blockSizes.split(\",\") bs1 = int(bs1) bs2 = int(bs2) juncstart = chromStart +",
"sharedinfo.ordered_tissue7: dct[species + \"_\" + sex + \"_\" + tissue] = dict() jun",
"= self.species + \"_\" + sex + \"_\" + tissue + \".sorted.junc.bed\" self.lines",
"irt, dncov, ancov, numsamps) = line.rstrip().split(\"\\t\") # spanki junc (chrom, chromStart, chromEnd, name,",
"score return dct if __name__ == '__main__': # consensus splicing junction of tra",
"species (e.g., dyak) location is whole range (e.g., 3L_17475357_17475426_+) output is a dict",
"self.tissue = tissue # self.filename = self.species + \"_\" + sex + \"_\"",
"dict() for d in dcts: for sst in d.keys(): # species_sex_tissue if not",
"ancov, numsamps) = line.rstrip().split(\"\\t\") # spanki junc (chrom, chromStart, chromEnd, name, score, strand,",
"species self.sex = sex self.tissue = tissue # self.filename = self.species + \"_\"",
"= FocalJunction(species, sex, tissue) if location in jun.juncinfo: dct[species + \"_\" + sex",
"= get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17475015_+\") mdct = merge_dcts([dct_cs, dct_as1, dct_as2]) mpd = pd.DataFrame.from_dict(mdct) mpd.to_csv(\"../data/output/dyak.tra.junc.summary.txt\", sep=\"\\t\")",
"+ \"_\" + tissue] = dict() jun = FocalJunction(species, sex, tissue) if location",
"dict of junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid,",
"tissue][location] = 0 return dct def merge_dcts(dcts): mdct = dict() for d in",
"+ str(juncstart) + \"-\" + str(juncend) + \"_\" + strand] = score return",
"dct def merge_dcts(dcts): mdct = dict() for d in dcts: for sst in",
"\"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") for sex in sharedinfo.ordered_sex:",
"(chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) =",
"for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: dct[species + \"_\" + sex",
"int(chromEnd) bs1, bs2 = blockSizes.split(\",\") bs1 = int(bs1) bs2 = int(bs2) juncstart =",
"gen = focalgene.FocalGene(species, geneid, \"M\") dct = dict() for sex in sharedinfo.ordered_sex: for",
"= line.rstrip().split(\"\\t\") # spanki junc (chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd,",
"dct[species + \"_\" + sex + \"_\" + tissue] = dict() jun =",
"__init__(self, species, sex, tissue): self.species = species self.sex = sex self.tissue = tissue",
"= get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\") # alternative splicing junction (long) of tra in dyak dct_as2",
"tissue in sharedinfo.ordered_tissue7: dct[species + \"_\" + sex + \"_\" + tissue] =",
"location is whole range (e.g., 3L_17475357_17475426_+) output is a dict of junctions with",
"bs2 = int(bs2) juncstart = chromStart + bs1 + 1 juncend = chromEnd",
"partiallocation): \"\"\" species (e.g., dyak) location is partial location (e.g., 3L_17475) output is",
"sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") dct = dict() for sex in sharedinfo.ordered_sex:",
"if not sst in mdct.keys(): mdct[sst] = dict() for range in d[sst].keys(): mdct[sst][range]",
"dncov, ancov, numsamps) = line.rstrip().split(\"\\t\") # spanki junc (chrom, chromStart, chromEnd, name, score,",
"pd from sharedinfo import exist_file, get_lines def get_junction_of_species_by_partiallocation(species, partiallocation): \"\"\" species (e.g., dyak)",
"get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\") # alternative splicing junction (short) of tra in dyak dct_as1 =",
"of tra in dyak dct_cs = get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\") # alternative splicing junction (short)",
"Handling normalized read count of genes \"\"\" import focalannotation import focalgene import sharedinfo",
"+ \"_\" + sex + \"_\" + tissue + \".merged.juncs\" # spanki juncs",
"'__main__': # consensus splicing junction of tra in dyak dct_cs = get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\")",
"partiallocation, range, jun.juncinfo[range]) def get_junction_of_species_by_location(species, location): \"\"\" species (e.g., dyak) location is whole",
"coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") dct = dict()",
"get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\") # alternative splicing junction (long) of tra in dyak dct_as2 =",
"juncstart = chromStart + bs1 + 1 juncend = chromEnd - bs2 dct[chrom",
"= 0 return dct def merge_dcts(dcts): mdct = dict() for d in dcts:",
"geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") for sex in sharedinfo.ordered_sex: for",
"= chromEnd - bs2 dct[chrom + \":\" + str(juncstart) + \"-\" + str(juncend)",
"intron_size, annostatus, gmcode, regcode, geneassign, cov, lirt, rirt, irt, dncov, ancov, numsamps) =",
"in dyak dct_cs = get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\") # alternative splicing junction (short) of tra",
"geneassign, cov, lirt, rirt, irt, dncov, ancov, numsamps) = line.rstrip().split(\"\\t\") # spanki junc",
"chromStart + bs1 + 1 juncend = chromEnd - bs2 dct[chrom + \":\"",
"\"_\" + tissue + \".sorted.junc.bed\" self.lines = get_lines(\"../data/junction\", self.filename) self.juncinfo = self.get_juncinfo() def",
"= get_lines(\"../data/junction\", self.filename) self.juncinfo = self.get_juncinfo() def get_juncinfo(self): \"\"\" get junction info \"\"\"",
"= species self.sex = sex self.tissue = tissue # self.filename = self.species +",
"juncs self.filename = self.species + \"_\" + sex + \"_\" + tissue +",
"\"_\" + tissue] = dict() jun = FocalJunction(species, sex, tissue) if location in",
"(short) of tra in dyak dct_as1 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\") # alternative splicing junction",
"junction (long) of tra in dyak dct_as2 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17475015_+\") mdct = merge_dcts([dct_cs,",
"+ \"_\" + sex + \"_\" + tissue + \".sorted.junc.bed\" self.lines = get_lines(\"../data/junction\",",
"for range in jun.juncinfo.keys(): if range.startswith(partiallocation): print(species + \"_\" + sex + \"_\"",
"+ \"_\" + strand] = score return dct if __name__ == '__main__': #",
"= sex self.tissue = tissue # self.filename = self.species + \"_\" + sex",
"\".sorted.junc.bed\" self.lines = get_lines(\"../data/junction\", self.filename) self.juncinfo = self.get_juncinfo() def get_juncinfo(self): \"\"\" get junction",
"itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\") chromStart = int(chromStart) chromEnd = int(chromEnd) bs1,",
"in jun.juncinfo.keys(): if range.startswith(partiallocation): print(species + \"_\" + sex + \"_\" + tissue,",
"+ sex + \"_\" + tissue + \".sorted.junc.bed\" self.lines = get_lines(\"../data/junction\", self.filename) self.juncinfo",
"mdct[sst] = dict() for range in d[sst].keys(): mdct[sst][range] = d[sst][range] return mdct class",
"FocalJunction: \"\"\"FocalJunction object\"\"\" def __init__(self, species, sex, tissue): self.species = species self.sex =",
"\".merged.juncs\" # spanki juncs self.filename = self.species + \"_\" + sex + \"_\"",
"+ \"_\" + tissue + \".merged.juncs\" # spanki juncs self.filename = self.species +",
"= dict() for range in d[sst].keys(): mdct[sst][range] = d[sst][range] return mdct class FocalJunction:",
"+ sex + \"_\" + tissue + \".merged.juncs\" # spanki juncs self.filename =",
"spanki juncs self.filename = self.species + \"_\" + sex + \"_\" + tissue",
"chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\")",
"lirt, rirt, irt, dncov, ancov, numsamps) = line.rstrip().split(\"\\t\") # spanki junc (chrom, chromStart,",
"+ 1 juncend = chromEnd - bs2 dct[chrom + \":\" + str(juncstart) +",
"bs1 = int(bs1) bs2 = int(bs2) juncstart = chromStart + bs1 + 1",
"spanki junc (chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes,",
"location): \"\"\" species (e.g., dyak) location is whole range (e.g., 3L_17475357_17475426_+) output is",
"thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\") chromStart = int(chromStart) chromEnd = int(chromEnd)",
"= FocalJunction(species, sex, tissue) for range in jun.juncinfo.keys(): if range.startswith(partiallocation): print(species + \"_\"",
"in dyak dct_as2 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17475015_+\") mdct = merge_dcts([dct_cs, dct_as1, dct_as2]) mpd =",
"sex, tissue) if location in jun.juncinfo: dct[species + \"_\" + sex + \"_\"",
"for line in self.lines: # (juncid, dinucleotide, intron_size, annostatus, gmcode, regcode, geneassign, cov,",
"\"_\" + sex + \"_\" + tissue] = dict() jun = FocalJunction(species, sex,",
"+ str(juncend) + \"_\" + strand] = score return dct if __name__ ==",
"junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") dct",
"+ strand] = score return dct if __name__ == '__main__': # consensus splicing",
"strand] = score return dct if __name__ == '__main__': # consensus splicing junction",
"self.lines: # (juncid, dinucleotide, intron_size, annostatus, gmcode, regcode, geneassign, cov, lirt, rirt, irt,",
"jun.juncinfo[location] else: dct[species + \"_\" + sex + \"_\" + tissue][location] = 0",
"exist_file, get_lines def get_junction_of_species_by_partiallocation(species, partiallocation): \"\"\" species (e.g., dyak) location is partial location",
"tissue) if location in jun.juncinfo: dct[species + \"_\" + sex + \"_\" +",
"chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\") chromStart",
"for sst in d.keys(): # species_sex_tissue if not sst in mdct.keys(): mdct[sst] =",
"a dict of junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species,",
"sex, tissue) for range in jun.juncinfo.keys(): if range.startswith(partiallocation): print(species + \"_\" + sex",
"focalannotation import focalgene import sharedinfo import pandas as pd from sharedinfo import exist_file,",
"+ tissue + \".merged.juncs\" # spanki juncs self.filename = self.species + \"_\" +",
"\"\"\" dct = dict() for line in self.lines: # (juncid, dinucleotide, intron_size, annostatus,",
"\"\"\" import focalannotation import focalgene import sharedinfo import pandas as pd from sharedinfo",
"tissue + \".merged.juncs\" # spanki juncs self.filename = self.species + \"_\" + sex",
"self.lines = get_lines(\"../data/junction\", self.filename) self.juncinfo = self.get_juncinfo() def get_juncinfo(self): \"\"\" get junction info",
"in sharedinfo.ordered_tissue7: dct[species + \"_\" + sex + \"_\" + tissue] = dict()",
"+ \"-\" + str(juncend) + \"_\" + strand] = score return dct if",
"focalgene import sharedinfo import pandas as pd from sharedinfo import exist_file, get_lines def",
"line.rstrip().split(\"\\t\") chromStart = int(chromStart) chromEnd = int(chromEnd) bs1, bs2 = blockSizes.split(\",\") bs1 =",
"of tra in dyak dct_as2 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17475015_+\") mdct = merge_dcts([dct_cs, dct_as1, dct_as2])",
"junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") for",
"species_sex_tissue if not sst in mdct.keys(): mdct[sst] = dict() for range in d[sst].keys():",
"d[sst][range] return mdct class FocalJunction: \"\"\"FocalJunction object\"\"\" def __init__(self, species, sex, tissue): self.species",
"\"_\" + tissue, partiallocation, range, jun.juncinfo[range]) def get_junction_of_species_by_location(species, location): \"\"\" species (e.g., dyak)",
"tissue] = dict() jun = FocalJunction(species, sex, tissue) if location in jun.juncinfo: dct[species",
"line.rstrip().split(\"\\t\") # spanki junc (chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb,",
"self.get_juncinfo() def get_juncinfo(self): \"\"\" get junction info \"\"\" dct = dict() for line",
"blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\") chromStart = int(chromStart) chromEnd = int(chromEnd) bs1, bs2",
"dict() jun = FocalJunction(species, sex, tissue) if location in jun.juncinfo: dct[species + \"_\"",
"if location in jun.juncinfo: dct[species + \"_\" + sex + \"_\" + tissue][location]",
"get_lines def get_junction_of_species_by_partiallocation(species, partiallocation): \"\"\" species (e.g., dyak) location is partial location (e.g.,",
"chromEnd - bs2 dct[chrom + \":\" + str(juncstart) + \"-\" + str(juncend) +",
"+ \".merged.juncs\" # spanki juncs self.filename = self.species + \"_\" + sex +",
"blockSizes, blockStarts) = line.rstrip().split(\"\\t\") chromStart = int(chromStart) chromEnd = int(chromEnd) bs1, bs2 =",
"in dcts: for sst in d.keys(): # species_sex_tissue if not sst in mdct.keys():",
"+ \"_\" + tissue][location] = jun.juncinfo[location] else: dct[species + \"_\" + sex +",
"\":\" + str(juncstart) + \"-\" + str(juncend) + \"_\" + strand] = score",
"in sharedinfo.ordered_tissue7: jun = FocalJunction(species, sex, tissue) for range in jun.juncinfo.keys(): if range.startswith(partiallocation):",
"\"M\") for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: jun = FocalJunction(species, sex,",
"for range in d[sst].keys(): mdct[sst][range] = d[sst][range] return mdct class FocalJunction: \"\"\"FocalJunction object\"\"\"",
"= dict() for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: dct[species + \"_\"",
"= int(bs1) bs2 = int(bs2) juncstart = chromStart + bs1 + 1 juncend",
"location (e.g., 3L_17475) output is all possible junctions with coverage \"\"\" geneid =",
"jun = FocalJunction(species, sex, tissue) if location in jun.juncinfo: dct[species + \"_\" +",
"return mdct class FocalJunction: \"\"\"FocalJunction object\"\"\" def __init__(self, species, sex, tissue): self.species =",
"object\"\"\" def __init__(self, species, sex, tissue): self.species = species self.sex = sex self.tissue",
"\"_\" + sex + \"_\" + tissue + \".merged.juncs\" # spanki juncs self.filename",
"blockStarts) = line.rstrip().split(\"\\t\") chromStart = int(chromStart) chromEnd = int(chromEnd) bs1, bs2 = blockSizes.split(\",\")",
"# alternative splicing junction (long) of tra in dyak dct_as2 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17475015_+\")",
"int(bs1) bs2 = int(bs2) juncstart = chromStart + bs1 + 1 juncend =",
"chromStart = int(chromStart) chromEnd = int(chromEnd) bs1, bs2 = blockSizes.split(\",\") bs1 = int(bs1)",
"(long) of tra in dyak dct_as2 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17475015_+\") mdct = merge_dcts([dct_cs, dct_as1,",
"= dict() for d in dcts: for sst in d.keys(): # species_sex_tissue if",
"with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") dct =",
"from sharedinfo import exist_file, get_lines def get_junction_of_species_by_partiallocation(species, partiallocation): \"\"\" species (e.g., dyak) location",
"info \"\"\" dct = dict() for line in self.lines: # (juncid, dinucleotide, intron_size,",
"splicing junction (short) of tra in dyak dct_as1 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\") # alternative",
"range in d[sst].keys(): mdct[sst][range] = d[sst][range] return mdct class FocalJunction: \"\"\"FocalJunction object\"\"\" def",
"score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\") chromStart = int(chromStart)",
"self.species + \"_\" + sex + \"_\" + tissue + \".sorted.junc.bed\" self.lines =",
"alternative splicing junction (short) of tra in dyak dct_as1 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\") #",
"\"\"\" Purpose: Handling normalized read count of genes \"\"\" import focalannotation import focalgene",
"= get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\") # alternative splicing junction (short) of tra in dyak dct_as1",
"cov, lirt, rirt, irt, dncov, ancov, numsamps) = line.rstrip().split(\"\\t\") # spanki junc (chrom,",
"+ sex + \"_\" + tissue][location] = 0 return dct def merge_dcts(dcts): mdct",
"junction info \"\"\" dct = dict() for line in self.lines: # (juncid, dinucleotide,",
"import focalgene import sharedinfo import pandas as pd from sharedinfo import exist_file, get_lines",
"in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: dct[species + \"_\" + sex + \"_\"",
"self.filename = self.species + \"_\" + sex + \"_\" + tissue + \".sorted.junc.bed\"",
"species, sex, tissue): self.species = species self.sex = sex self.tissue = tissue #",
"read count of genes \"\"\" import focalannotation import focalgene import sharedinfo import pandas",
"dct = dict() for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: dct[species +",
"sharedinfo import pandas as pd from sharedinfo import exist_file, get_lines def get_junction_of_species_by_partiallocation(species, partiallocation):",
"output is all possible junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen =",
"tissue # self.filename = self.species + \"_\" + sex + \"_\" + tissue",
"sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: jun = FocalJunction(species, sex, tissue) for",
"class FocalJunction: \"\"\"FocalJunction object\"\"\" def __init__(self, species, sex, tissue): self.species = species self.sex",
"# spanki juncs self.filename = self.species + \"_\" + sex + \"_\" +",
"dinucleotide, intron_size, annostatus, gmcode, regcode, geneassign, cov, lirt, rirt, irt, dncov, ancov, numsamps)",
"of genes \"\"\" import focalannotation import focalgene import sharedinfo import pandas as pd",
"sex + \"_\" + tissue, partiallocation, range, jun.juncinfo[range]) def get_junction_of_species_by_location(species, location): \"\"\" species",
"range.startswith(partiallocation): print(species + \"_\" + sex + \"_\" + tissue, partiallocation, range, jun.juncinfo[range])",
"sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: dct[species + \"_\" + sex +",
"= dict() jun = FocalJunction(species, sex, tissue) if location in jun.juncinfo: dct[species +",
"= score return dct if __name__ == '__main__': # consensus splicing junction of",
"junction of tra in dyak dct_cs = get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\") # alternative splicing junction",
"__name__ == '__main__': # consensus splicing junction of tra in dyak dct_cs =",
"in d.keys(): # species_sex_tissue if not sst in mdct.keys(): mdct[sst] = dict() for",
"dict() for line in self.lines: # (juncid, dinucleotide, intron_size, annostatus, gmcode, regcode, geneassign,",
"dict() for range in d[sst].keys(): mdct[sst][range] = d[sst][range] return mdct class FocalJunction: \"\"\"FocalJunction",
"(juncid, dinucleotide, intron_size, annostatus, gmcode, regcode, geneassign, cov, lirt, rirt, irt, dncov, ancov,",
"= jun.juncinfo[location] else: dct[species + \"_\" + sex + \"_\" + tissue][location] =",
"dct[chrom + \":\" + str(juncstart) + \"-\" + str(juncend) + \"_\" + strand]",
"self.sex = sex self.tissue = tissue # self.filename = self.species + \"_\" +",
"is a dict of junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen =",
"annostatus, gmcode, regcode, geneassign, cov, lirt, rirt, irt, dncov, ancov, numsamps) = line.rstrip().split(\"\\t\")",
"else: dct[species + \"_\" + sex + \"_\" + tissue][location] = 0 return",
"tissue][location] = jun.juncinfo[location] else: dct[species + \"_\" + sex + \"_\" + tissue][location]",
"+ \"_\" + tissue + \".sorted.junc.bed\" self.lines = get_lines(\"../data/junction\", self.filename) self.juncinfo = self.get_juncinfo()",
"range, jun.juncinfo[range]) def get_junction_of_species_by_location(species, location): \"\"\" species (e.g., dyak) location is whole range",
"tissue): self.species = species self.sex = sex self.tissue = tissue # self.filename =",
"if __name__ == '__main__': # consensus splicing junction of tra in dyak dct_cs",
"(e.g., 3L_17475357_17475426_+) output is a dict of junctions with coverage \"\"\" geneid =",
"normalized read count of genes \"\"\" import focalannotation import focalgene import sharedinfo import",
"tissue, partiallocation, range, jun.juncinfo[range]) def get_junction_of_species_by_location(species, location): \"\"\" species (e.g., dyak) location is",
"possible junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\")",
"genes \"\"\" import focalannotation import focalgene import sharedinfo import pandas as pd from",
"== '__main__': # consensus splicing junction of tra in dyak dct_cs = get_junction_of_species_by_location(\"dyak\",",
"# consensus splicing junction of tra in dyak dct_cs = get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\") #",
"dyak) location is partial location (e.g., 3L_17475) output is all possible junctions with",
"merge_dcts(dcts): mdct = dict() for d in dcts: for sst in d.keys(): #",
"with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") for sex",
"print(species + \"_\" + sex + \"_\" + tissue, partiallocation, range, jun.juncinfo[range]) def",
"= blockSizes.split(\",\") bs1 = int(bs1) bs2 = int(bs2) juncstart = chromStart + bs1",
"\"_\" + strand] = score return dct if __name__ == '__main__': # consensus",
"1 juncend = chromEnd - bs2 dct[chrom + \":\" + str(juncstart) + \"-\"",
"sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: jun = FocalJunction(species, sex, tissue) for range in",
"in d[sst].keys(): mdct[sst][range] = d[sst][range] return mdct class FocalJunction: \"\"\"FocalJunction object\"\"\" def __init__(self,",
"self.filename = self.species + \"_\" + sex + \"_\" + tissue + \".merged.juncs\"",
"mdct = dict() for d in dcts: for sst in d.keys(): # species_sex_tissue",
"for tissue in sharedinfo.ordered_tissue7: dct[species + \"_\" + sex + \"_\" + tissue]",
"= int(chromEnd) bs1, bs2 = blockSizes.split(\",\") bs1 = int(bs1) bs2 = int(bs2) juncstart",
"rirt, irt, dncov, ancov, numsamps) = line.rstrip().split(\"\\t\") # spanki junc (chrom, chromStart, chromEnd,",
"sst in mdct.keys(): mdct[sst] = dict() for range in d[sst].keys(): mdct[sst][range] = d[sst][range]",
"= dict() for line in self.lines: # (juncid, dinucleotide, intron_size, annostatus, gmcode, regcode,",
"\"3L:17474772-17474844_+\") # alternative splicing junction (long) of tra in dyak dct_as2 = get_junction_of_species_by_location(\"dyak\",",
"bs1, bs2 = blockSizes.split(\",\") bs1 = int(bs1) bs2 = int(bs2) juncstart = chromStart",
"location is partial location (e.g., 3L_17475) output is all possible junctions with coverage",
"+ \"_\" + sex + \"_\" + tissue] = dict() jun = FocalJunction(species,",
"FocalJunction(species, sex, tissue) for range in jun.juncinfo.keys(): if range.startswith(partiallocation): print(species + \"_\" +",
"mdct[sst][range] = d[sst][range] return mdct class FocalJunction: \"\"\"FocalJunction object\"\"\" def __init__(self, species, sex,",
"chromEnd = int(chromEnd) bs1, bs2 = blockSizes.split(\",\") bs1 = int(bs1) bs2 = int(bs2)",
"\"_\" + tissue][location] = 0 return dct def merge_dcts(dcts): mdct = dict() for",
"+ \":\" + str(juncstart) + \"-\" + str(juncend) + \"_\" + strand] =",
"focalgene.FocalGene(species, geneid, \"M\") dct = dict() for sex in sharedinfo.ordered_sex: for tissue in",
"in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: jun = FocalJunction(species, sex, tissue) for range",
"+ sex + \"_\" + tissue][location] = jun.juncinfo[location] else: dct[species + \"_\" +",
"blockSizes.split(\",\") bs1 = int(bs1) bs2 = int(bs2) juncstart = chromStart + bs1 +",
"+ tissue] = dict() jun = FocalJunction(species, sex, tissue) if location in jun.juncinfo:",
"str(juncstart) + \"-\" + str(juncend) + \"_\" + strand] = score return dct",
"get junction info \"\"\" dct = dict() for line in self.lines: # (juncid,",
"dct if __name__ == '__main__': # consensus splicing junction of tra in dyak",
"\"_\" + tissue + \".merged.juncs\" # spanki juncs self.filename = self.species + \"_\"",
"tissue) for range in jun.juncinfo.keys(): if range.startswith(partiallocation): print(species + \"_\" + sex +",
"sex + \"_\" + tissue + \".merged.juncs\" # spanki juncs self.filename = self.species",
"+ tissue + \".sorted.junc.bed\" self.lines = get_lines(\"../data/junction\", self.filename) self.juncinfo = self.get_juncinfo() def get_juncinfo(self):",
"+ tissue][location] = 0 return dct def merge_dcts(dcts): mdct = dict() for d",
"Purpose: Handling normalized read count of genes \"\"\" import focalannotation import focalgene import",
"int(bs2) juncstart = chromStart + bs1 + 1 juncend = chromEnd - bs2",
"python \"\"\" Purpose: Handling normalized read count of genes \"\"\" import focalannotation import",
"+ \"_\" + sex + \"_\" + tissue][location] = jun.juncinfo[location] else: dct[species +",
"junc (chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts)",
"is partial location (e.g., 3L_17475) output is all possible junctions with coverage \"\"\"",
"geneid, \"M\") dct = dict() for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7:",
"splicing junction of tra in dyak dct_cs = get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\") # alternative splicing",
"gen = focalgene.FocalGene(species, geneid, \"M\") for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7:",
"\"_\" + sex + \"_\" + tissue, partiallocation, range, jun.juncinfo[range]) def get_junction_of_species_by_location(species, location):",
"species (e.g., dyak) location is partial location (e.g., 3L_17475) output is all possible",
"regcode, geneassign, cov, lirt, rirt, irt, dncov, ancov, numsamps) = line.rstrip().split(\"\\t\") # spanki",
"for d in dcts: for sst in d.keys(): # species_sex_tissue if not sst",
"dyak) location is whole range (e.g., 3L_17475357_17475426_+) output is a dict of junctions",
"return dct if __name__ == '__main__': # consensus splicing junction of tra in",
"import pandas as pd from sharedinfo import exist_file, get_lines def get_junction_of_species_by_partiallocation(species, partiallocation): \"\"\"",
"\"3L:17475357-17475426_+\") # alternative splicing junction (short) of tra in dyak dct_as1 = get_junction_of_species_by_location(\"dyak\",",
"splicing junction (long) of tra in dyak dct_as2 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17475015_+\") mdct =",
"of tra in dyak dct_as1 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\") # alternative splicing junction (long)",
"= sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") for sex in sharedinfo.ordered_sex: for tissue",
"for tissue in sharedinfo.ordered_tissue7: jun = FocalJunction(species, sex, tissue) for range in jun.juncinfo.keys():",
"sex self.tissue = tissue # self.filename = self.species + \"_\" + sex +",
"- bs2 dct[chrom + \":\" + str(juncstart) + \"-\" + str(juncend) + \"_\"",
"# self.filename = self.species + \"_\" + sex + \"_\" + tissue +",
"in mdct.keys(): mdct[sst] = dict() for range in d[sst].keys(): mdct[sst][range] = d[sst][range] return",
"numsamps) = line.rstrip().split(\"\\t\") # spanki junc (chrom, chromStart, chromEnd, name, score, strand, thickStart,",
"name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\") chromStart =",
"dct = dict() for line in self.lines: # (juncid, dinucleotide, intron_size, annostatus, gmcode,",
"bs1 + 1 juncend = chromEnd - bs2 dct[chrom + \":\" + str(juncstart)",
"sex + \"_\" + tissue + \".sorted.junc.bed\" self.lines = get_lines(\"../data/junction\", self.filename) self.juncinfo =",
"# spanki junc (chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount,",
"self.filename) self.juncinfo = self.get_juncinfo() def get_juncinfo(self): \"\"\" get junction info \"\"\" dct =",
"get_junction_of_species_by_location(species, location): \"\"\" species (e.g., dyak) location is whole range (e.g., 3L_17475357_17475426_+) output",
"\"_\" + sex + \"_\" + tissue][location] = jun.juncinfo[location] else: dct[species + \"_\"",
"= line.rstrip().split(\"\\t\") chromStart = int(chromStart) chromEnd = int(chromEnd) bs1, bs2 = blockSizes.split(\",\") bs1",
"dct[species + \"_\" + sex + \"_\" + tissue][location] = 0 return dct",
"sex + \"_\" + tissue][location] = jun.juncinfo[location] else: dct[species + \"_\" + sex",
"sharedinfo import exist_file, get_lines def get_junction_of_species_by_partiallocation(species, partiallocation): \"\"\" species (e.g., dyak) location is",
"str(juncend) + \"_\" + strand] = score return dct if __name__ == '__main__':",
"\"M\") dct = dict() for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: dct[species",
"of junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\")",
"tissue + \".sorted.junc.bed\" self.lines = get_lines(\"../data/junction\", self.filename) self.juncinfo = self.get_juncinfo() def get_juncinfo(self): \"\"\"",
"tra in dyak dct_as2 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17475015_+\") mdct = merge_dcts([dct_cs, dct_as1, dct_as2]) mpd",
"import focalannotation import focalgene import sharedinfo import pandas as pd from sharedinfo import",
"+ \"_\" + sex + \"_\" + tissue, partiallocation, range, jun.juncinfo[range]) def get_junction_of_species_by_location(species,",
"def merge_dcts(dcts): mdct = dict() for d in dcts: for sst in d.keys():",
"location in jun.juncinfo: dct[species + \"_\" + sex + \"_\" + tissue][location] =",
"dct_as2 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17475015_+\") mdct = merge_dcts([dct_cs, dct_as1, dct_as2]) mpd = pd.DataFrame.from_dict(mdct) mpd.to_csv(\"../data/output/dyak.tra.junc.summary.txt\",",
"dyak dct_as1 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\") # alternative splicing junction (long) of tra in",
"= tissue # self.filename = self.species + \"_\" + sex + \"_\" +",
"get_juncinfo(self): \"\"\" get junction info \"\"\" dct = dict() for line in self.lines:",
"+ \"_\" + tissue][location] = 0 return dct def merge_dcts(dcts): mdct = dict()",
"+ sex + \"_\" + tissue, partiallocation, range, jun.juncinfo[range]) def get_junction_of_species_by_location(species, location): \"\"\"",
"= self.get_juncinfo() def get_juncinfo(self): \"\"\" get junction info \"\"\" dct = dict() for",
"\"\"\" species (e.g., dyak) location is whole range (e.g., 3L_17475357_17475426_+) output is a",
"consensus splicing junction of tra in dyak dct_cs = get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\") # alternative",
"(e.g., dyak) location is partial location (e.g., 3L_17475) output is all possible junctions",
"+ \".sorted.junc.bed\" self.lines = get_lines(\"../data/junction\", self.filename) self.juncinfo = self.get_juncinfo() def get_juncinfo(self): \"\"\" get",
"3L_17475357_17475426_+) output is a dict of junctions with coverage \"\"\" geneid = sharedinfo.tra_species2geneid[species]",
"alternative splicing junction (long) of tra in dyak dct_as2 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17475015_+\") mdct",
"d in dcts: for sst in d.keys(): # species_sex_tissue if not sst in",
"d.keys(): # species_sex_tissue if not sst in mdct.keys(): mdct[sst] = dict() for range",
"import exist_file, get_lines def get_junction_of_species_by_partiallocation(species, partiallocation): \"\"\" species (e.g., dyak) location is partial",
"tra in dyak dct_cs = get_junction_of_species_by_location(\"dyak\", \"3L:17475357-17475426_+\") # alternative splicing junction (short) of",
"count of genes \"\"\" import focalannotation import focalgene import sharedinfo import pandas as",
"+ tissue][location] = jun.juncinfo[location] else: dct[species + \"_\" + sex + \"_\" +",
"strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\") chromStart = int(chromStart) chromEnd",
"\"_\" + sex + \"_\" + tissue][location] = 0 return dct def merge_dcts(dcts):",
"# species_sex_tissue if not sst in mdct.keys(): mdct[sst] = dict() for range in",
"\"-\" + str(juncend) + \"_\" + strand] = score return dct if __name__",
"is whole range (e.g., 3L_17475357_17475426_+) output is a dict of junctions with coverage",
"= d[sst][range] return mdct class FocalJunction: \"\"\"FocalJunction object\"\"\" def __init__(self, species, sex, tissue):",
"tissue in sharedinfo.ordered_tissue7: jun = FocalJunction(species, sex, tissue) for range in jun.juncinfo.keys(): if",
"for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: jun = FocalJunction(species, sex, tissue)",
"\"_\" + sex + \"_\" + tissue + \".sorted.junc.bed\" self.lines = get_lines(\"../data/junction\", self.filename)",
"= sharedinfo.tra_species2geneid[species] gen = focalgene.FocalGene(species, geneid, \"M\") dct = dict() for sex in",
"d[sst].keys(): mdct[sst][range] = d[sst][range] return mdct class FocalJunction: \"\"\"FocalJunction object\"\"\" def __init__(self, species,",
"sharedinfo.ordered_tissue7: jun = FocalJunction(species, sex, tissue) for range in jun.juncinfo.keys(): if range.startswith(partiallocation): print(species",
"def __init__(self, species, sex, tissue): self.species = species self.sex = sex self.tissue =",
"dcts: for sst in d.keys(): # species_sex_tissue if not sst in mdct.keys(): mdct[sst]",
"junction (short) of tra in dyak dct_as1 = get_junction_of_species_by_location(\"dyak\", \"3L:17474772-17474844_+\") # alternative splicing",
"get_lines(\"../data/junction\", self.filename) self.juncinfo = self.get_juncinfo() def get_juncinfo(self): \"\"\" get junction info \"\"\" dct",
"0 return dct def merge_dcts(dcts): mdct = dict() for d in dcts: for",
"= focalgene.FocalGene(species, geneid, \"M\") for sex in sharedinfo.ordered_sex: for tissue in sharedinfo.ordered_tissue7: jun",
"jun = FocalJunction(species, sex, tissue) for range in jun.juncinfo.keys(): if range.startswith(partiallocation): print(species +",
"juncend = chromEnd - bs2 dct[chrom + \":\" + str(juncstart) + \"-\" +"
] |
[
"end): if start in trees: return root = Node(start) children = [get_tree(i) for",
"tree found by breadth first search.\"\"\" seen: Set[int] = set() root_node = Node(root)",
"int: \"\"\"Return the number nodes in the proof tree.\"\"\" return 1 + sum(len(c)",
"= queue.popleft() rule = choice(list(rules_dict[v.label])) if not (v.label in seen or rule ==",
"RulesDict, root: int) -> Node: \"\"\"Finds an iterative proof tree for root, if",
"not None and maximum <= 0: return if not root_labels: yield seen, []",
"all of the trees using the given roots..\"\"\" if seen is None: seen",
"iterative_proof_tree_finder(rules_dict: RulesDict, root: int) -> Node: \"\"\"Finds an iterative proof tree for root,",
"maximum: Optional[int] = None ) -> Iterator[Tuple[FrozenSet[int], List[Node]]]: if maximum is not None",
"in a combinatorial specification. This changes rdict in place. \"\"\" changed = True",
"1 + sum(len(c) for c in self.children) def prune(rdict: RulesDict) -> None: \"\"\"",
"prune(rdict: RulesDict) -> None: \"\"\" Prune all nodes not in a combinatorial specification.",
"children is None: children = [] self.label = n self.children = children def",
"= choice(list(rules_dict[v.label])) if not (v.label in seen or rule == ()): children =",
"be pruned. \"\"\" def _dfs_tree( root_label: int, seen: FrozenSet[int], maximum: int = None",
"RulesDict, root: int, maximum: Optional[int] = None ) -> Iterator[Node]: \"\"\"A generator for",
"child.label == root]) v.children = children return root_node def random_proof_tree(rules_dict: RulesDict, root: int)",
"= children seen.add(v.label) return root_node def smallish_random_proof_tree( rules_dict: RulesDict, root: int, minimization_time_limit: float",
"= next_tree smallest_size = next_tree_size return smallest_so_far def proof_tree_generator_bfs(rules_dict: RulesDict, root: int) ->",
"def _dfs_tree( root_label: int, seen: FrozenSet[int], maximum: int = None ) -> Iterator[Tuple[FrozenSet[int],",
"= next_tree_size return smallest_so_far def proof_tree_generator_bfs(rules_dict: RulesDict, root: int) -> Iterator[Node]: \"\"\"A generator",
"def random_proof_tree(rules_dict: RulesDict, root: int) -> Node: \"\"\"Return random tree found by breadth",
"in trees) if maximum is not None and actual_length < maximum: yield seen1.union(seen2),",
"set([self.label]) res.update(chain.from_iterable(node.labels() for node in self.children)) return res def nodes(self) -> Iterator[\"Node\"]: \"\"\"Yield",
"\"\"\" import time from collections import defaultdict, deque from copy import deepcopy from",
"None ) -> Tuple[Set[int], List[Node]]: \"\"\"Return all labels which have been seen, together",
"int, children: Optional[List[\"Node\"]] = None): if children is None: children = [] self.label",
"None for seen2, trees in _dfs_forest(roots, seen1, new_maximum): actual_length = length + sum(len(t)",
"self.children: yield from node.nodes() def __str__(self) -> str: return \"\".join([\"(\", str(self.label), *map(str, self.children),",
"in rules_dict[root_label]: for children in product( *[_bfs_helper(child_label, next_seen) for child_label in rule] ):",
"smallest_so_far = random_proof_tree(rules_dict, root=root) smallest_size = len(smallest_so_far) while time.time() - start_time < minimization_time_limit:",
"= False for k, rule_set in list(rdict.items()): for rule in list(rule_set): if all(x",
"queue.extend(children) v.children = children seen.add(v.label) return root_node def smallish_random_proof_tree( rules_dict: RulesDict, root: int,",
"import deepcopy from itertools import chain, product from random import choice, shuffle from",
"self.children = children def labels(self) -> Set[int]: \"\"\"Return the set of all labels",
"len(smallest_so_far) while time.time() - start_time < minimization_time_limit: next_tree = random_proof_tree(rules_dict, root=root) next_tree_size =",
"yield root_node sorted_rules_dict = { start: tuple(sorted(ends)) for start, ends in rules_dict.items() }",
"root_label: int, seen: FrozenSet[int], maximum: int = None ) -> Iterator[Tuple[FrozenSet[int], Node]]: if",
"tree in _dfs_tree(root, seen, new_max): length = len(tree) new_maximum = maximum - length",
"in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) create_tree(k, rule) rdict[k].remove(rule) if not rule_set:",
"rule = sorted(rules_dict[v.label])[0] if not rule == (): children = [Node(i) for i",
"\"\"\" def _bfs_helper(root_label: int, seen: FrozenSet[int]): if root_label in seen: yield Node(root_label) return",
"rule == (): yield seen, Node(root_label) else: for new_seen, children in _dfs_forest(rule, seen,",
"= length + sum(len(t) for t in trees) if maximum is not None",
"if maximum is not None and maximum <= 0: return if not root_labels:",
"def labels(self) -> Set[int]: \"\"\"Return the set of all labels in the proof",
"\"\"\"Return random tree found by breadth first search.\"\"\" seen: Set[int] = set() root_node",
"= True verified_labels.add(k) new_rules_dict[k].add(rule) create_tree(k, rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if",
"-> int: \"\"\"Return the number nodes in the proof tree.\"\"\" return 1 +",
"[tree] + trees sorted_rules_dict = { start: tuple(sorted(ends)) for start, ends in rules_dict.items()",
"= defaultdict(set) while True: changed = False for k, rule_set in list(rdict.items()): for",
"RulesDict, roots: Sequence[int], seen: Optional[Set[int]] = None ) -> Tuple[Set[int], List[Node]]: \"\"\"Return all",
"in the proof tree.\"\"\" return 1 + sum(len(c) for c in self.children) def",
"Optional[Set[int]] = None ) -> Tuple[Set[int], List[Node]]: \"\"\"Return all labels which have been",
"the given roots..\"\"\" if seen is None: seen = set() if not roots:",
"i in rule] shuffle(children) queue.extend(children) v.children = children seen.add(v.label) return root_node def smallish_random_proof_tree(",
"seen1, tree in _dfs_tree(root, seen, new_max): length = len(tree) new_maximum = maximum -",
"not rule == (): children = [Node(i) for i in rule] queue.extend([child for",
"seen, [] else: root, roots = root_labels[0], root_labels[1:] new_max = maximum - len(root_labels)",
"def nodes(self) -> Iterator[\"Node\"]: \"\"\"Yield all nodes in the proof tree.\"\"\" yield self",
"children yield root_node sorted_rules_dict = { start: tuple(sorted(ends)) for start, ends in rules_dict.items()",
"None: seen = set() seen = seen.copy() if root in rules_dict: rule_set =",
"if rule == (): yield seen, Node(root_label) else: for new_seen, children in _dfs_forest(rule,",
"children = [Node(i) for i in rule] queue.extend([child for child in children if",
"not (v.label in seen or rule == ()): children = [Node(i) for i",
"Set[int]: \"\"\"Return the set of all labels in the proof tree.\"\"\" res =",
"root: Optional[int] = None) -> RulesDict: \"\"\"Prune all nodes not iteratively verifiable.\"\"\" verified_labels:",
"rdict for x in rule): rule_set.remove(rule) changed = True if not rule_set: del",
"<= 0: return if not root_labels: yield seen, [] else: root, roots =",
"if not rule_set: del rdict[k] if not changed: break return new_rules_dict def proof_tree_dfs(rules_dict:",
"pruned. \"\"\" def _bfs_helper(root_label: int, seen: FrozenSet[int]): if root_label in seen: yield Node(root_label)",
"*[_bfs_helper(child_label, next_seen) for child_label in rule] ): root_node = Node(root_label) root_node.children = children",
"is not None else None for seen2, trees in _dfs_forest(roots, seen1, new_maximum): actual_length",
"= None ) -> Iterator[Tuple[FrozenSet[int], Node]]: if maximum is not None and maximum",
"rule = choice(list(rules_dict[v.label])) if not (v.label in seen or rule == ()): children",
"yield Node(root_label) return next_seen = seen.union((root_label,)) for rule in rules_dict[root_label]: for children in",
"children = [Node(i) for i in rule] shuffle(children) queue.extend(children) v.children = children seen.add(v.label)",
"def get_tree(start): if start == root: return Node(start) if start in trees: return",
"rule_set: seen.add(root) return seen, root_node seen.add(root) rule = choice(list(rule_set)) visited, trees = all_proof_trees_dfs(rules_dict,",
"new_seen, root_node def _dfs_forest( root_labels: Sequence[int], seen: FrozenSet[int], maximum: Optional[int] = None )",
"\"\"\"Prune all nodes not iteratively verifiable.\"\"\" verified_labels: Set[int] = set() if root is",
"= maximum - length if maximum is not None else None for seen2,",
"[] else: root, roots = root_labels[0], root_labels[1:] new_max = maximum - len(root_labels) +",
"rule in list(rule_set): if all(x in verified_labels for x in rule): changed =",
"maximum): root_node = Node(root_label) root_node.children = children yield new_seen, root_node def _dfs_forest( root_labels:",
"rdict[k] if not changed: break return new_rules_dict def proof_tree_dfs(rules_dict: RulesDict, root: int, seen:",
"= len(tree) new_maximum = maximum - length if maximum is not None else",
"new_rules_dict def proof_tree_dfs(rules_dict: RulesDict, root: int, seen: Optional[Set[int]] = None): \"\"\"Return random proof",
"proof tree. \"\"\" import time from collections import defaultdict, deque from copy import",
"def __init__(self, n: int, children: Optional[List[\"Node\"]] = None): if children is None: children",
"nodes not in a combinatorial specification. This changes rdict in place. \"\"\" changed",
"smallish_random_proof_tree( rules_dict: RulesDict, root: int, minimization_time_limit: float ) -> Node: \"\"\" Searches a",
"root_node = Node(root) if root in seen or () in rule_set: seen.add(root) return",
"= None ) -> Iterator[Node]: \"\"\"A generator for all proof trees using depth",
"int = None ) -> Iterator[Tuple[FrozenSet[int], Node]]: if maximum is not None and",
"yield seen, Node(root_label) return seen = seen.union((root_label,)) for rule in sorted_rules_dict[root_label]: if rule",
"time from collections import defaultdict, deque from copy import deepcopy from itertools import",
"random_proof_tree(rules_dict, root=root) next_tree_size = len(next_tree) if next_tree_size < smallest_size: smallest_so_far = next_tree smallest_size",
"deque([root_node]) while queue: v = queue.popleft() rule = choice(list(rules_dict[v.label])) if not (v.label in",
"not in rdict for x in rule): rule_set.remove(rule) changed = True if not",
"in seen or rule == ()): children = [Node(i) for i in rule]",
"verified_labels: Set[int] = set() if root is not None: verified_labels.add(root) rdict = deepcopy(rules_dict)",
"changed = True if not rule_set: del rdict[k] def iterative_prune(rules_dict: RulesDict, root: Optional[int]",
"str(self.label), *map(str, self.children), \")\"]) def __len__(self) -> int: \"\"\"Return the number nodes in",
"root: int, minimization_time_limit: float ) -> Node: \"\"\" Searches a rule_dict known to",
"RulesDict, root: int, seen: Optional[Set[int]] = None): \"\"\"Return random proof tree found by",
"proof_tree_generator_dfs( rules_dict: RulesDict, root: int, maximum: Optional[int] = None ) -> Iterator[Node]: \"\"\"A",
"nodes not iteratively verifiable.\"\"\" verified_labels: Set[int] = set() if root is not None:",
"changed = True verified_labels.add(k) new_rules_dict[k].add(rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if not",
"set of all labels in the proof tree.\"\"\" res = set([self.label]) res.update(chain.from_iterable(node.labels() for",
"for x in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) create_tree(k, rule) rdict[k].remove(rule) if",
"+ trees sorted_rules_dict = { start: tuple(sorted(ends)) for start, ends in rules_dict.items() }",
"random proof tree found by depth first search.\"\"\" if seen is None: seen",
"raise KeyError(\"{} is not in trees\".format(start)) def create_tree(start, end): if start in trees:",
"shuffle(children) queue.extend(children) v.children = children seen.add(v.label) return root_node def smallish_random_proof_tree( rules_dict: RulesDict, root:",
"next_tree = random_proof_tree(rules_dict, root=root) next_tree_size = len(next_tree) if next_tree_size < smallest_size: smallest_so_far =",
"Node(root_label) else: for new_seen, children in _dfs_forest(rule, seen, maximum): root_node = Node(root_label) root_node.children",
"= True verified_labels.add(k) new_rules_dict[k].add(rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if not changed:",
"to be pruned. \"\"\" def _dfs_tree( root_label: int, seen: FrozenSet[int], maximum: int =",
"proof tree.\"\"\" res = set([self.label]) res.update(chain.from_iterable(node.labels() for node in self.children)) return res def",
"rule_set: del rdict[k] if not changed: break return new_rules_dict def proof_tree_dfs(rules_dict: RulesDict, root:",
"while time.time() - start_time < minimization_time_limit: next_tree = random_proof_tree(rules_dict, root=root) next_tree_size = len(next_tree)",
"-> Iterator[Tuple[FrozenSet[int], List[Node]]]: if maximum is not None and maximum <= 0: return",
"children if not child.label == root]) v.children = children return root_node def random_proof_tree(rules_dict:",
"from _bfs_helper(root, frozenset()) def proof_tree_generator_dfs( rules_dict: RulesDict, root: int, maximum: Optional[int] = None",
"root_node = Node(root_label) root_node.children = children yield root_node sorted_rules_dict = { start: tuple(sorted(ends))",
"start_time < minimization_time_limit: next_tree = random_proof_tree(rules_dict, root=root) next_tree_size = len(next_tree) if next_tree_size <",
"maximum is not None else None for seen2, trees in _dfs_forest(roots, seen1, new_maximum):",
"maximum - len(root_labels) + 1 if maximum is not None else None for",
"v.children = children return root_node def random_proof_tree(rules_dict: RulesDict, root: int) -> Node: \"\"\"Return",
"tree def iterative_proof_tree_finder(rules_dict: RulesDict, root: int) -> Node: \"\"\"Finds an iterative proof tree",
"the proof tree.\"\"\" return 1 + sum(len(c) for c in self.children) def prune(rdict:",
"rule_set: del rdict[k] if not changed: break if root in trees: return trees[root]",
"= None): \"\"\"Return random proof tree found by depth first search.\"\"\" if seen",
"is assumed to be pruned. \"\"\" def _bfs_helper(root_label: int, seen: FrozenSet[int]): if root_label",
"seen = set() if not roots: return seen, [] root, roots = roots[0],",
"specification. Spends minimization_time_limit seconds searching. \"\"\" start_time = time.time() smallest_so_far = random_proof_tree(rules_dict, root=root)",
"is None: seen = set() seen = seen.copy() if root in rules_dict: rule_set",
"Iterator[\"Node\"]: \"\"\"Yield all nodes in the proof tree.\"\"\" yield self for node in",
"The rules_dict is assumed to be pruned. \"\"\" def _dfs_tree( root_label: int, seen:",
"rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if not changed: break if root",
"itertools import chain, product from random import choice, shuffle from typing import Dict,",
"_dfs_forest(rule, seen, maximum): root_node = Node(root_label) root_node.children = children yield new_seen, root_node def",
"all labels in the proof tree.\"\"\" res = set([self.label]) res.update(chain.from_iterable(node.labels() for node in",
"\"\"\"Return the number nodes in the proof tree.\"\"\" return 1 + sum(len(c) for",
"root: int) -> Node: \"\"\"Takes in a iterative pruned rules_dict and returns iterative",
"Sequence[int], seen: Optional[Set[int]] = None ) -> Tuple[Set[int], List[Node]]: \"\"\"Return all labels which",
"= Node(root) queue = deque([root_node]) while queue: v = queue.popleft() rule = choice(list(rules_dict[v.label]))",
"res def nodes(self) -> Iterator[\"Node\"]: \"\"\"Yield all nodes in the proof tree.\"\"\" yield",
"if root in rules_dict: rule_set = rules_dict[root] root_node = Node(root) if root in",
"self.children), \")\"]) def __len__(self) -> int: \"\"\"Return the number nodes in the proof",
"seen1, tree = proof_tree_dfs(rules_dict, root, seen) seen2, trees = all_proof_trees_dfs(rules_dict, roots, seen1) return",
"= all_proof_trees_dfs(rules_dict, roots, seen1) return seen1.union(seen2), [tree] + trees def iterative_proof_tree_bfs(rules_dict: RulesDict, root:",
"= {} def get_tree(start): if start == root: return Node(start) if start in",
"choice(list(rules_dict[v.label])) if not (v.label in seen or rule == ()): children = [Node(i)",
"not None else None for seen2, trees in _dfs_forest(roots, seen1, new_maximum): actual_length =",
"for child_label in rule] ): root_node = Node(root_label) root_node.children = children yield root_node",
"if root in trees: return trees[root] raise ValueError(\"{} has no tree in rules_dict\".format(root))",
"RulesDict, root: Optional[int] = None) -> RulesDict: \"\"\"Prune all nodes not iteratively verifiable.\"\"\"",
"of the trees using the given roots..\"\"\" if seen is None: seen =",
"seen.copy() if root in rules_dict: rule_set = rules_dict[root] root_node = Node(root) if root",
"import chain, product from random import choice, shuffle from typing import Dict, FrozenSet,",
"is None: seen = set() if not roots: return seen, [] root, roots",
"FrozenSet[int]): if root_label in seen: yield Node(root_label) return next_seen = seen.union((root_label,)) for rule",
"iterative_prune(rules_dict: RulesDict, root: Optional[int] = None) -> RulesDict: \"\"\"Prune all nodes not iteratively",
"root_node sorted_rules_dict = { start: tuple(sorted(ends)) for start, ends in rules_dict.items() } if",
"The rules_dict is assumed to be pruned. \"\"\" def _bfs_helper(root_label: int, seen: FrozenSet[int]):",
"_dfs_tree(root, seen, new_max): length = len(tree) new_maximum = maximum - length if maximum",
"labels which have been seen, together with all of the trees using the",
"found by depth first search.\"\"\" if seen is None: seen = set() seen",
"in verified_labels for x in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) rdict[k].remove(rule) if",
"trees in _dfs_forest(roots, seen1, new_maximum): actual_length = length + sum(len(t) for t in",
"self for node in self.children: yield from node.nodes() def __str__(self) -> str: return",
"rules_dict: RulesDict, root: int, minimization_time_limit: float ) -> Node: \"\"\" Searches a rule_dict",
"new_maximum = maximum - length if maximum is not None else None for",
"in the proof tree.\"\"\" yield self for node in self.children: yield from node.nodes()",
"-> Node: \"\"\"Takes in a iterative pruned rules_dict and returns iterative proof tree.\"\"\"",
"Node: \"\"\" Searches a rule_dict known to contain at least one specification for",
"smallest_so_far def proof_tree_generator_bfs(rules_dict: RulesDict, root: int) -> Iterator[Node]: \"\"\"A generator for all proof",
"search. N.B. The rules_dict is assumed to be pruned. \"\"\" def _bfs_helper(root_label: int,",
"for x in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) rdict[k].remove(rule) if not rule_set:",
"create_tree(k, rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if not changed: break if",
"if start in trees: return root = Node(start) children = [get_tree(i) for i",
"root, roots = roots[0], roots[1:] seen1, tree = proof_tree_dfs(rules_dict, root, seen) seen2, trees",
"roots = root_labels[0], root_labels[1:] new_max = maximum - len(root_labels) + 1 if maximum",
"children in product( *[_bfs_helper(child_label, next_seen) for child_label in rule] ): root_node = Node(root_label)",
"node in self.children)) return res def nodes(self) -> Iterator[\"Node\"]: \"\"\"Yield all nodes in",
"verified_labels = set() if root is not None: verified_labels.add(root) rdict = deepcopy(rules_dict) new_rules_dict:",
"for start, ends in rules_dict.items() } if root in sorted_rules_dict: yield from _bfs_helper(root,",
"a iterative pruned rules_dict and returns iterative proof tree.\"\"\" root_node = Node(root) queue",
"__init__(self, n: int, children: Optional[List[\"Node\"]] = None): if children is None: children =",
"del rdict[k] if not changed: break return new_rules_dict def proof_tree_dfs(rules_dict: RulesDict, root: int,",
"root_node def smallish_random_proof_tree( rules_dict: RulesDict, root: int, minimization_time_limit: float ) -> Node: \"\"\"",
"depth first search.\"\"\" if seen is None: seen = set() seen = seen.copy()",
"proof_tree_dfs(rules_dict, root, seen) seen2, trees = all_proof_trees_dfs(rules_dict, roots, seen1) return seen1.union(seen2), [tree] +",
"Iterator[Node]: \"\"\"A generator for all proof trees using breadth first search. N.B. The",
"maximum is not None and maximum <= 0: return if root_label in seen:",
"nodes in the proof tree.\"\"\" return 1 + sum(len(c) for c in self.children)",
"root_node = Node(root) queue = deque([root_node]) while queue: v = queue.popleft() rule =",
"rules_dict is assumed to be pruned. \"\"\" def _dfs_tree( root_label: int, seen: FrozenSet[int],",
"a small specification. Spends minimization_time_limit seconds searching. \"\"\" start_time = time.time() smallest_so_far =",
"rule_set.remove(rule) changed = True if not rule_set: del rdict[k] def iterative_prune(rules_dict: RulesDict, root:",
"for rule in list(rule_set): if any(x not in rdict for x in rule):",
"res.update(chain.from_iterable(node.labels() for node in self.children)) return res def nodes(self) -> Iterator[\"Node\"]: \"\"\"Yield all",
"in self.children)) return res def nodes(self) -> Iterator[\"Node\"]: \"\"\"Yield all nodes in the",
"= Node(root_label) root_node.children = children yield root_node sorted_rules_dict = { start: tuple(sorted(ends)) for",
"_dfs_forest( root_labels: Sequence[int], seen: FrozenSet[int], maximum: Optional[int] = None ) -> Iterator[Tuple[FrozenSet[int], List[Node]]]:",
"_dfs_tree(root, frozenset(), maximum): yield tree def iterative_proof_tree_finder(rules_dict: RulesDict, root: int) -> Node: \"\"\"Finds",
"seen = set() seen = seen.copy() if root in rules_dict: rule_set = rules_dict[root]",
"class Node: \"\"\"A node for a proof tree.\"\"\" def __init__(self, n: int, children:",
"if not (v.label in seen or rule == ()): children = [Node(i) for",
"changes rdict in place. \"\"\" changed = True while changed: changed = False",
"seen: FrozenSet[int]): if root_label in seen: yield Node(root_label) return next_seen = seen.union((root_label,)) for",
"rules_dict is assumed to be pruned. \"\"\" def _bfs_helper(root_label: int, seen: FrozenSet[int]): if",
"[] root, roots = roots[0], roots[1:] seen1, tree = proof_tree_dfs(rules_dict, root, seen) seen2,",
"in seen: yield Node(root_label) return next_seen = seen.union((root_label,)) for rule in rules_dict[root_label]: for",
"() in rule_set: seen.add(root) return seen, root_node seen.add(root) rule = choice(list(rule_set)) visited, trees",
"N.B. The rules_dict is assumed to be pruned. \"\"\" def _dfs_tree( root_label: int,",
"tree in _dfs_tree(root, frozenset(), maximum): yield tree def iterative_proof_tree_finder(rules_dict: RulesDict, root: int) ->",
"= len(next_tree) if next_tree_size < smallest_size: smallest_so_far = next_tree smallest_size = next_tree_size return",
"seen: FrozenSet[int], maximum: Optional[int] = None ) -> Iterator[Tuple[FrozenSet[int], List[Node]]]: if maximum is",
"rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) create_tree(k, rule) rdict[k].remove(rule) if not rule_set: del",
"if not root_labels: yield seen, [] else: root, roots = root_labels[0], root_labels[1:] new_max",
"-> Iterator[Tuple[FrozenSet[int], Node]]: if maximum is not None and maximum <= 0: return",
"if not child.label == root]) v.children = children return root_node def random_proof_tree(rules_dict: RulesDict,",
"N.B. The rules_dict is assumed to be pruned. \"\"\" def _bfs_helper(root_label: int, seen:",
"for a small specification. Spends minimization_time_limit seconds searching. \"\"\" start_time = time.time() smallest_so_far",
"= children def labels(self) -> Set[int]: \"\"\"Return the set of all labels in",
"\"\"\" def _dfs_tree( root_label: int, seen: FrozenSet[int], maximum: int = None ) ->",
"search. N.B. The rules_dict is assumed to be pruned. \"\"\" def _dfs_tree( root_label:",
"generator for all proof trees using depth first search. N.B. The rules_dict is",
"self.label = n self.children = children def labels(self) -> Set[int]: \"\"\"Return the set",
"in place. \"\"\" changed = True while changed: changed = False for k,",
"trees return visited, root_node def all_proof_trees_dfs( rules_dict: RulesDict, roots: Sequence[int], seen: Optional[Set[int]] =",
"RulesDict, root: int) -> Node: \"\"\"Takes in a iterative pruned rules_dict and returns",
"def iterative_proof_tree_finder(rules_dict: RulesDict, root: int) -> Node: \"\"\"Finds an iterative proof tree for",
"call a proof tree. \"\"\" import time from collections import defaultdict, deque from",
"in sorted_rules_dict[root_label]: if rule == (): yield seen, Node(root_label) else: for new_seen, children",
"seen or () in rule_set: seen.add(root) return seen, root_node seen.add(root) rule = choice(list(rule_set))",
"Node: \"\"\"Finds an iterative proof tree for root, if one exists. \"\"\" trees:",
"None ) -> Iterator[Tuple[FrozenSet[int], List[Node]]]: if maximum is not None and maximum <=",
"True: changed = False for k, rule_set in list(rdict.items()): for rule in list(rule_set):",
"- len(root_labels) + 1 if maximum is not None else None for seen1,",
"seen, new_max): length = len(tree) new_maximum = maximum - length if maximum is",
"= seen.union((root_label,)) for rule in rules_dict[root_label]: for children in product( *[_bfs_helper(child_label, next_seen) for",
"+ sum(len(c) for c in self.children) def prune(rdict: RulesDict) -> None: \"\"\" Prune",
"rdict in place. \"\"\" changed = True while changed: changed = False for",
"yield seen1.union(seen2), [tree] + trees sorted_rules_dict = { start: tuple(sorted(ends)) for start, ends",
"Sequence[int], seen: FrozenSet[int], maximum: Optional[int] = None ) -> Iterator[Tuple[FrozenSet[int], List[Node]]]: if maximum",
"all nodes in the proof tree.\"\"\" yield self for node in self.children: yield",
"k, rule_set in list(rdict.items()): for rule in list(rule_set): if all(x in verified_labels for",
"roots[1:] seen1, tree = proof_tree_dfs(rules_dict, root, seen) seen2, trees = all_proof_trees_dfs(rules_dict, roots, seen1)",
"i in rule] queue.extend([child for child in children if not child.label == root])",
"seen or rule == ()): children = [Node(i) for i in rule] shuffle(children)",
"yield self for node in self.children: yield from node.nodes() def __str__(self) -> str:",
"= set() if not roots: return seen, [] root, roots = roots[0], roots[1:]",
"RulesDict, root: int) -> Iterator[Node]: \"\"\"A generator for all proof trees using breadth",
"get_tree(start): if start == root: return Node(start) if start in trees: return trees[start]",
"\"\"\"A generator for all proof trees using breadth first search. N.B. The rules_dict",
"FrozenSet[int], maximum: Optional[int] = None ) -> Iterator[Tuple[FrozenSet[int], List[Node]]]: if maximum is not",
"t in trees) if maximum is not None and actual_length < maximum: yield",
"by breadth first search.\"\"\" seen: Set[int] = set() root_node = Node(root) queue =",
"for i in rule] shuffle(children) queue.extend(children) v.children = children seen.add(v.label) return root_node def",
"= roots[0], roots[1:] seen1, tree = proof_tree_dfs(rules_dict, root, seen) seen2, trees = all_proof_trees_dfs(rules_dict,",
"minimization_time_limit: next_tree = random_proof_tree(rules_dict, root=root) next_tree_size = len(next_tree) if next_tree_size < smallest_size: smallest_so_far",
"in _dfs_forest(roots, seen1, new_maximum): actual_length = length + sum(len(t) for t in trees)",
"set() root_node = Node(root) queue = deque([root_node]) while queue: v = queue.popleft() rule",
"is not None and maximum <= 0: return if not root_labels: yield seen,",
"= Node(start) children = [get_tree(i) for i in end] root.children = children trees[start]",
"\"\"\"Return all labels which have been seen, together with all of the trees",
"v = queue.popleft() rule = sorted(rules_dict[v.label])[0] if not rule == (): children =",
"seen1.union(seen2), [tree] + trees sorted_rules_dict = { start: tuple(sorted(ends)) for start, ends in",
"Node] = {} def get_tree(start): if start == root: return Node(start) if start",
"1 if maximum is not None else None for seen1, tree in _dfs_tree(root,",
"\"\"\"Yield all nodes in the proof tree.\"\"\" yield self for node in self.children:",
"in trees: return trees[start] raise KeyError(\"{} is not in trees\".format(start)) def create_tree(start, end):",
"-> Node: \"\"\" Searches a rule_dict known to contain at least one specification",
"given roots..\"\"\" if seen is None: seen = set() if not roots: return",
"child_label in rule] ): root_node = Node(root_label) root_node.children = children yield root_node sorted_rules_dict",
"rules_dict.items() } if root in sorted_rules_dict: for _, tree in _dfs_tree(root, frozenset(), maximum):",
"< smallest_size: smallest_so_far = next_tree smallest_size = next_tree_size return smallest_so_far def proof_tree_generator_bfs(rules_dict: RulesDict,",
"for t in trees) if maximum is not None and actual_length < maximum:",
"which have been seen, together with all of the trees using the given",
"} if root in sorted_rules_dict: yield from _bfs_helper(root, frozenset()) def proof_tree_generator_dfs( rules_dict: RulesDict,",
"None and maximum <= 0: return if not root_labels: yield seen, [] else:",
"len(root_labels) + 1 if maximum is not None else None for seen1, tree",
"_dfs_forest(roots, seen1, new_maximum): actual_length = length + sum(len(t) for t in trees) if",
"minimization_time_limit: float ) -> Node: \"\"\" Searches a rule_dict known to contain at",
"(\"prune\", \"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\") RulesDict = Dict[int, Set[Tuple[int, ...]]] class Node: \"\"\"A node for",
"queue = deque([root_node]) while queue: v = queue.popleft() rule = sorted(rules_dict[v.label])[0] if not",
"None: seen = set() if not roots: return seen, [] root, roots =",
"RulesDict = Dict[int, Set[Tuple[int, ...]]] class Node: \"\"\"A node for a proof tree.\"\"\"",
"+ 1 if maximum is not None else None for seen1, tree in",
"None ) -> Iterator[Tuple[FrozenSet[int], Node]]: if maximum is not None and maximum <=",
"first search. N.B. The rules_dict is assumed to be pruned. \"\"\" def _bfs_helper(root_label:",
"rule in list(rule_set): if any(x not in rdict for x in rule): rule_set.remove(rule)",
"for node in self.children)) return res def nodes(self) -> Iterator[\"Node\"]: \"\"\"Yield all nodes",
"seen2, trees = all_proof_trees_dfs(rules_dict, roots, seen1) return seen1.union(seen2), [tree] + trees def iterative_proof_tree_bfs(rules_dict:",
"time.time() - start_time < minimization_time_limit: next_tree = random_proof_tree(rules_dict, root=root) next_tree_size = len(next_tree) if",
"returns a combinatorial specification, that we call a proof tree. \"\"\" import time",
"copy import deepcopy from itertools import chain, product from random import choice, shuffle",
"= None): if children is None: children = [] self.label = n self.children",
"Node(start) children = [get_tree(i) for i in end] root.children = children trees[start] =",
"def all_proof_trees_dfs( rules_dict: RulesDict, roots: Sequence[int], seen: Optional[Set[int]] = None ) -> Tuple[Set[int],",
"queue = deque([root_node]) while queue: v = queue.popleft() rule = choice(list(rules_dict[v.label])) if not",
"in trees\".format(start)) def create_tree(start, end): if start in trees: return root = Node(start)",
"while True: changed = False for k, rule_set in list(rdict.items()): for rule in",
"maximum: int = None ) -> Iterator[Tuple[FrozenSet[int], Node]]: if maximum is not None",
"rule_dict known to contain at least one specification for a small specification. Spends",
"in rule): rule_set.remove(rule) changed = True if not rule_set: del rdict[k] def iterative_prune(rules_dict:",
"all_proof_trees_dfs( rules_dict: RulesDict, roots: Sequence[int], seen: Optional[Set[int]] = None ) -> Tuple[Set[int], List[Node]]:",
"seen2, trees in _dfs_forest(roots, seen1, new_maximum): actual_length = length + sum(len(t) for t",
"not rule_set: del rdict[k] def iterative_prune(rules_dict: RulesDict, root: Optional[int] = None) -> RulesDict:",
"= seen.copy() if root in rules_dict: rule_set = rules_dict[root] root_node = Node(root) if",
"= rules_dict[root] root_node = Node(root) if root in seen or () in rule_set:",
"rdict = deepcopy(rules_dict) new_rules_dict: RulesDict = defaultdict(set) while True: changed = False for",
"seen: yield seen, Node(root_label) return seen = seen.union((root_label,)) for rule in sorted_rules_dict[root_label]: if",
"tree.\"\"\" root_node = Node(root) queue = deque([root_node]) while queue: v = queue.popleft() rule",
"tree. \"\"\" import time from collections import defaultdict, deque from copy import deepcopy",
"if all(x in verified_labels for x in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule)",
"if root in seen or () in rule_set: seen.add(root) return seen, root_node seen.add(root)",
"search.\"\"\" seen: Set[int] = set() root_node = Node(root) queue = deque([root_node]) while queue:",
"rule, seen) root_node.children = trees return visited, root_node def all_proof_trees_dfs( rules_dict: RulesDict, roots:",
"seen: Set[int] = set() root_node = Node(root) queue = deque([root_node]) while queue: v",
"Dict[int, Set[Tuple[int, ...]]] class Node: \"\"\"A node for a proof tree.\"\"\" def __init__(self,",
"()): children = [Node(i) for i in rule] shuffle(children) queue.extend(children) v.children = children",
"__len__(self) -> int: \"\"\"Return the number nodes in the proof tree.\"\"\" return 1",
"(): children = [Node(i) for i in rule] queue.extend([child for child in children",
"in rule_set: seen.add(root) return seen, root_node seen.add(root) rule = choice(list(rule_set)) visited, trees =",
"\"\".join([\"(\", str(self.label), *map(str, self.children), \")\"]) def __len__(self) -> int: \"\"\"Return the number nodes",
"del rdict[k] if not changed: break if root in trees: return trees[root] raise",
"while queue: v = queue.popleft() rule = sorted(rules_dict[v.label])[0] if not rule == ():",
"proof trees using breadth first search. N.B. The rules_dict is assumed to be",
"import Dict, FrozenSet, Iterator, List, Optional, Sequence, Set, Tuple __all__ = (\"prune\", \"proof_tree_generator_dfs\",",
"[tree] + trees def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) -> Node: \"\"\"Takes in a",
"if any(x not in rdict for x in rule): rule_set.remove(rule) changed = True",
"tuple(sorted(ends)) for start, ends in rules_dict.items() } if root in sorted_rules_dict: yield from",
"in sorted_rules_dict: for _, tree in _dfs_tree(root, frozenset(), maximum): yield tree def iterative_proof_tree_finder(rules_dict:",
"This changes rdict in place. \"\"\" changed = True while changed: changed =",
"Optional, Sequence, Set, Tuple __all__ = (\"prune\", \"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\") RulesDict = Dict[int, Set[Tuple[int,",
"proof trees using depth first search. N.B. The rules_dict is assumed to be",
"Node(root_label) return seen = seen.union((root_label,)) for rule in sorted_rules_dict[root_label]: if rule == ():",
"trees: return trees[start] raise KeyError(\"{} is not in trees\".format(start)) def create_tree(start, end): if",
"return visited, root_node def all_proof_trees_dfs( rules_dict: RulesDict, roots: Sequence[int], seen: Optional[Set[int]] = None",
"changed: break if root in trees: return trees[root] raise ValueError(\"{} has no tree",
"children trees[start] = root verified_labels = set() if root is not None: verified_labels.add(root)",
"seen1.union(seen2), [tree] + trees def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) -> Node: \"\"\"Takes in",
"tree.\"\"\" res = set([self.label]) res.update(chain.from_iterable(node.labels() for node in self.children)) return res def nodes(self)",
"= root verified_labels = set() if root is not None: verified_labels.add(root) rdict =",
"a combinatorial specification. This changes rdict in place. \"\"\" changed = True while",
"int, minimization_time_limit: float ) -> Node: \"\"\" Searches a rule_dict known to contain",
"None else None for seen1, tree in _dfs_tree(root, seen, new_max): length = len(tree)",
"children = [get_tree(i) for i in end] root.children = children trees[start] = root",
"for seen1, tree in _dfs_tree(root, seen, new_max): length = len(tree) new_maximum = maximum",
"iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) -> Node: \"\"\"Takes in a iterative pruned rules_dict and",
"break if root in trees: return trees[root] raise ValueError(\"{} has no tree in",
"Node: \"\"\"Return random tree found by breadth first search.\"\"\" seen: Set[int] = set()",
"rdict[k] if not changed: break if root in trees: return trees[root] raise ValueError(\"{}",
"in self.children) def prune(rdict: RulesDict) -> None: \"\"\" Prune all nodes not in",
"proof tree.\"\"\" yield self for node in self.children: yield from node.nodes() def __str__(self)",
"by depth first search.\"\"\" if seen is None: seen = set() seen =",
"False for k, rule_set in list(rdict.items()): for rule in list(rule_set): if all(x in",
"deepcopy from itertools import chain, product from random import choice, shuffle from typing",
"root in sorted_rules_dict: yield from _bfs_helper(root, frozenset()) def proof_tree_generator_dfs( rules_dict: RulesDict, root: int,",
"in list(rule_set): if all(x in verified_labels for x in rule): changed = True",
"proof_tree_dfs(rules_dict: RulesDict, root: int, seen: Optional[Set[int]] = None): \"\"\"Return random proof tree found",
"verified_labels.add(root) rdict = deepcopy(rules_dict) new_rules_dict: RulesDict = defaultdict(set) while True: changed = False",
"or () in rule_set: seen.add(root) return seen, root_node seen.add(root) rule = choice(list(rule_set)) visited,",
"root]) v.children = children return root_node def random_proof_tree(rules_dict: RulesDict, root: int) -> Node:",
"= random_proof_tree(rules_dict, root=root) next_tree_size = len(next_tree) if next_tree_size < smallest_size: smallest_so_far = next_tree",
"not in trees\".format(start)) def create_tree(start, end): if start in trees: return root =",
"verified_labels.add(k) new_rules_dict[k].add(rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if not changed: break return",
"return root_node def smallish_random_proof_tree( rules_dict: RulesDict, root: int, minimization_time_limit: float ) -> Node:",
"rule_set = rules_dict[root] root_node = Node(root) if root in seen or () in",
"children yield new_seen, root_node def _dfs_forest( root_labels: Sequence[int], seen: FrozenSet[int], maximum: Optional[int] =",
"visited, trees = all_proof_trees_dfs(rules_dict, rule, seen) root_node.children = trees return visited, root_node def",
"\"\"\" Prune all nodes not in a combinatorial specification. This changes rdict in",
"breadth first search. N.B. The rules_dict is assumed to be pruned. \"\"\" def",
"next_seen) for child_label in rule] ): root_node = Node(root_label) root_node.children = children yield",
"-> Node: \"\"\"Finds an iterative proof tree for root, if one exists. \"\"\"",
"in sorted_rules_dict: yield from _bfs_helper(root, frozenset()) def proof_tree_generator_dfs( rules_dict: RulesDict, root: int, maximum:",
"in _dfs_tree(root, seen, new_max): length = len(tree) new_maximum = maximum - length if",
"None): if children is None: children = [] self.label = n self.children =",
"maximum - length if maximum is not None else None for seen2, trees",
") -> Iterator[Node]: \"\"\"A generator for all proof trees using depth first search.",
"None): \"\"\"Return random proof tree found by depth first search.\"\"\" if seen is",
"\"\"\" start_time = time.time() smallest_so_far = random_proof_tree(rules_dict, root=root) smallest_size = len(smallest_so_far) while time.time()",
"root: int) -> Node: \"\"\"Finds an iterative proof tree for root, if one",
"be pruned. \"\"\" def _bfs_helper(root_label: int, seen: FrozenSet[int]): if root_label in seen: yield",
"def smallish_random_proof_tree( rules_dict: RulesDict, root: int, minimization_time_limit: float ) -> Node: \"\"\" Searches",
"seen: Optional[Set[int]] = None): \"\"\"Return random proof tree found by depth first search.\"\"\"",
"typing import Dict, FrozenSet, Iterator, List, Optional, Sequence, Set, Tuple __all__ = (\"prune\",",
"in seen: yield seen, Node(root_label) return seen = seen.union((root_label,)) for rule in sorted_rules_dict[root_label]:",
"return next_seen = seen.union((root_label,)) for rule in rules_dict[root_label]: for children in product( *[_bfs_helper(child_label,",
"if seen is None: seen = set() if not roots: return seen, []",
"rule = choice(list(rule_set)) visited, trees = all_proof_trees_dfs(rules_dict, rule, seen) root_node.children = trees return",
"= random_proof_tree(rules_dict, root=root) smallest_size = len(smallest_so_far) while time.time() - start_time < minimization_time_limit: next_tree",
"to be pruned. \"\"\" def _bfs_helper(root_label: int, seen: FrozenSet[int]): if root_label in seen:",
"if maximum is not None and maximum <= 0: return if root_label in",
"if maximum is not None and actual_length < maximum: yield seen1.union(seen2), [tree] +",
"= proof_tree_dfs(rules_dict, root, seen) seen2, trees = all_proof_trees_dfs(rules_dict, roots, seen1) return seen1.union(seen2), [tree]",
"= children yield new_seen, root_node def _dfs_forest( root_labels: Sequence[int], seen: FrozenSet[int], maximum: Optional[int]",
"root_labels[0], root_labels[1:] new_max = maximum - len(root_labels) + 1 if maximum is not",
"sorted_rules_dict[root_label]: if rule == (): yield seen, Node(root_label) else: for new_seen, children in",
"random import choice, shuffle from typing import Dict, FrozenSet, Iterator, List, Optional, Sequence,",
"iterative proof tree.\"\"\" root_node = Node(root) queue = deque([root_node]) while queue: v =",
"all nodes not in a combinatorial specification. This changes rdict in place. \"\"\"",
"if not rule_set: del rdict[k] def iterative_prune(rules_dict: RulesDict, root: Optional[int] = None) ->",
"roots: return seen, [] root, roots = roots[0], roots[1:] seen1, tree = proof_tree_dfs(rules_dict,",
"root_labels: yield seen, [] else: root, roots = root_labels[0], root_labels[1:] new_max = maximum",
"return if not root_labels: yield seen, [] else: root, roots = root_labels[0], root_labels[1:]",
"all labels which have been seen, together with all of the trees using",
"not None: verified_labels.add(root) rdict = deepcopy(rules_dict) new_rules_dict: RulesDict = defaultdict(set) while True: changed",
"tree.\"\"\" def __init__(self, n: int, children: Optional[List[\"Node\"]] = None): if children is None:",
"if children is None: children = [] self.label = n self.children = children",
"roots[0], roots[1:] seen1, tree = proof_tree_dfs(rules_dict, root, seen) seen2, trees = all_proof_trees_dfs(rules_dict, roots,",
"== (): yield seen, Node(root_label) else: for new_seen, children in _dfs_forest(rule, seen, maximum):",
"rule in rules_dict[root_label]: for children in product( *[_bfs_helper(child_label, next_seen) for child_label in rule]",
"in rules_dict.items() } if root in sorted_rules_dict: for _, tree in _dfs_tree(root, frozenset(),",
"= root_labels[0], root_labels[1:] new_max = maximum - len(root_labels) + 1 if maximum is",
"place. \"\"\" changed = True while changed: changed = False for k, rule_set",
"changed: break return new_rules_dict def proof_tree_dfs(rules_dict: RulesDict, root: int, seen: Optional[Set[int]] = None):",
"Node(start) if start in trees: return trees[start] raise KeyError(\"{} is not in trees\".format(start))",
"- start_time < minimization_time_limit: next_tree = random_proof_tree(rules_dict, root=root) next_tree_size = len(next_tree) if next_tree_size",
"def create_tree(start, end): if start in trees: return root = Node(start) children =",
"for start, ends in rules_dict.items() } if root in sorted_rules_dict: for _, tree",
"children: Optional[List[\"Node\"]] = None): if children is None: children = [] self.label =",
"not root_labels: yield seen, [] else: root, roots = root_labels[0], root_labels[1:] new_max =",
"combinatorial specification, that we call a proof tree. \"\"\" import time from collections",
"smallest_size = next_tree_size return smallest_so_far def proof_tree_generator_bfs(rules_dict: RulesDict, root: int) -> Iterator[Node]: \"\"\"A",
"verified_labels for x in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) create_tree(k, rule) rdict[k].remove(rule)",
"_bfs_helper(root_label: int, seen: FrozenSet[int]): if root_label in seen: yield Node(root_label) return next_seen =",
"node.nodes() def __str__(self) -> str: return \"\".join([\"(\", str(self.label), *map(str, self.children), \")\"]) def __len__(self)",
"an iterative proof tree for root, if one exists. \"\"\" trees: Dict[int, Node]",
"rule] shuffle(children) queue.extend(children) v.children = children seen.add(v.label) return root_node def smallish_random_proof_tree( rules_dict: RulesDict,",
"= set([self.label]) res.update(chain.from_iterable(node.labels() for node in self.children)) return res def nodes(self) -> Iterator[\"Node\"]:",
"seen, Node(root_label) else: for new_seen, children in _dfs_forest(rule, seen, maximum): root_node = Node(root_label)",
"exists. \"\"\" trees: Dict[int, Node] = {} def get_tree(start): if start == root:",
"\"\"\"Takes in a iterative pruned rules_dict and returns iterative proof tree.\"\"\" root_node =",
"and returns a combinatorial specification, that we call a proof tree. \"\"\" import",
"using the given roots..\"\"\" if seen is None: seen = set() if not",
"root_node.children = trees return visited, root_node def all_proof_trees_dfs( rules_dict: RulesDict, roots: Sequence[int], seen:",
"queue: v = queue.popleft() rule = choice(list(rules_dict[v.label])) if not (v.label in seen or",
"root_node.children = children yield root_node sorted_rules_dict = { start: tuple(sorted(ends)) for start, ends",
"maximum): yield tree def iterative_proof_tree_finder(rules_dict: RulesDict, root: int) -> Node: \"\"\"Finds an iterative",
"not changed: break if root in trees: return trees[root] raise ValueError(\"{} has no",
"None: \"\"\" Prune all nodes not in a combinatorial specification. This changes rdict",
"maximum is not None and maximum <= 0: return if not root_labels: yield",
"= [] self.label = n self.children = children def labels(self) -> Set[int]: \"\"\"Return",
"root=root) next_tree_size = len(next_tree) if next_tree_size < smallest_size: smallest_so_far = next_tree smallest_size =",
"seen.add(root) return seen, root_node seen.add(root) rule = choice(list(rule_set)) visited, trees = all_proof_trees_dfs(rules_dict, rule,",
"to contain at least one specification for a small specification. Spends minimization_time_limit seconds",
"proof tree for root, if one exists. \"\"\" trees: Dict[int, Node] = {}",
"queue.popleft() rule = choice(list(rules_dict[v.label])) if not (v.label in seen or rule == ()):",
"import time from collections import defaultdict, deque from copy import deepcopy from itertools",
"return smallest_so_far def proof_tree_generator_bfs(rules_dict: RulesDict, root: int) -> Iterator[Node]: \"\"\"A generator for all",
"if root in sorted_rules_dict: yield from _bfs_helper(root, frozenset()) def proof_tree_generator_dfs( rules_dict: RulesDict, root:",
"small specification. Spends minimization_time_limit seconds searching. \"\"\" start_time = time.time() smallest_so_far = random_proof_tree(rules_dict,",
"== root: return Node(start) if start in trees: return trees[start] raise KeyError(\"{} is",
"root, seen) seen2, trees = all_proof_trees_dfs(rules_dict, roots, seen1) return seen1.union(seen2), [tree] + trees",
"if not changed: break if root in trees: return trees[root] raise ValueError(\"{} has",
"Prune all nodes not in a combinatorial specification. This changes rdict in place.",
"seen is None: seen = set() if not roots: return seen, [] root,",
"def __len__(self) -> int: \"\"\"Return the number nodes in the proof tree.\"\"\" return",
"rule == (): children = [Node(i) for i in rule] queue.extend([child for child",
"root_node def all_proof_trees_dfs( rules_dict: RulesDict, roots: Sequence[int], seen: Optional[Set[int]] = None ) ->",
"trees) if maximum is not None and actual_length < maximum: yield seen1.union(seen2), [tree]",
"node for a proof tree.\"\"\" def __init__(self, n: int, children: Optional[List[\"Node\"]] = None):",
"using depth first search. N.B. The rules_dict is assumed to be pruned. \"\"\"",
"children return root_node def random_proof_tree(rules_dict: RulesDict, root: int) -> Node: \"\"\"Return random tree",
"start, ends in rules_dict.items() } if root in sorted_rules_dict: for _, tree in",
"sorted_rules_dict: for _, tree in _dfs_tree(root, frozenset(), maximum): yield tree def iterative_proof_tree_finder(rules_dict: RulesDict,",
"True while changed: changed = False for k, rule_set in list(rdict.items()): for rule",
"[] self.label = n self.children = children def labels(self) -> Set[int]: \"\"\"Return the",
"specification for a small specification. Spends minimization_time_limit seconds searching. \"\"\" start_time = time.time()",
"not None and maximum <= 0: return if root_label in seen: yield seen,",
"rule_set: del rdict[k] def iterative_prune(rules_dict: RulesDict, root: Optional[int] = None) -> RulesDict: \"\"\"Prune",
"= Node(root_label) root_node.children = children yield new_seen, root_node def _dfs_forest( root_labels: Sequence[int], seen:",
"roots..\"\"\" if seen is None: seen = set() if not roots: return seen,",
"proof tree found by depth first search.\"\"\" if seen is None: seen =",
"rule] ): root_node = Node(root_label) root_node.children = children yield root_node sorted_rules_dict = {",
"rdict[k].remove(rule) if not rule_set: del rdict[k] if not changed: break return new_rules_dict def",
"with all of the trees using the given roots..\"\"\" if seen is None:",
"minimization_time_limit seconds searching. \"\"\" start_time = time.time() smallest_so_far = random_proof_tree(rules_dict, root=root) smallest_size =",
"root, roots = root_labels[0], root_labels[1:] new_max = maximum - len(root_labels) + 1 if",
"set() if root is not None: verified_labels.add(root) rdict = deepcopy(rules_dict) new_rules_dict: RulesDict =",
"seen1) return seen1.union(seen2), [tree] + trees def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) -> Node:",
"using breadth first search. N.B. The rules_dict is assumed to be pruned. \"\"\"",
"maximum <= 0: return if root_label in seen: yield seen, Node(root_label) return seen",
"Dict[int, Node] = {} def get_tree(start): if start == root: return Node(start) if",
"iterative proof tree for root, if one exists. \"\"\" trees: Dict[int, Node] =",
"< minimization_time_limit: next_tree = random_proof_tree(rules_dict, root=root) next_tree_size = len(next_tree) if next_tree_size < smallest_size:",
"or rule == ()): children = [Node(i) for i in rule] shuffle(children) queue.extend(children)",
"maximum <= 0: return if not root_labels: yield seen, [] else: root, roots",
"all nodes not iteratively verifiable.\"\"\" verified_labels: Set[int] = set() if root is not",
"combinatorial specification. This changes rdict in place. \"\"\" changed = True while changed:",
"Node: \"\"\"Takes in a iterative pruned rules_dict and returns iterative proof tree.\"\"\" root_node",
"def iterative_prune(rules_dict: RulesDict, root: Optional[int] = None) -> RulesDict: \"\"\"Prune all nodes not",
"choice, shuffle from typing import Dict, FrozenSet, Iterator, List, Optional, Sequence, Set, Tuple",
"True if not rule_set: del rdict[k] def iterative_prune(rules_dict: RulesDict, root: Optional[int] = None)",
"seen, maximum): root_node = Node(root_label) root_node.children = children yield new_seen, root_node def _dfs_forest(",
"== root]) v.children = children return root_node def random_proof_tree(rules_dict: RulesDict, root: int) ->",
"in _dfs_tree(root, frozenset(), maximum): yield tree def iterative_proof_tree_finder(rules_dict: RulesDict, root: int) -> Node:",
"rule_set in list(rdict.items()): for rule in list(rule_set): if any(x not in rdict for",
"in end] root.children = children trees[start] = root verified_labels = set() if root",
"int, seen: FrozenSet[int]): if root_label in seen: yield Node(root_label) return next_seen = seen.union((root_label,))",
"trees using breadth first search. N.B. The rules_dict is assumed to be pruned.",
"Tuple[Set[int], List[Node]]: \"\"\"Return all labels which have been seen, together with all of",
"\"\"\" trees: Dict[int, Node] = {} def get_tree(start): if start == root: return",
"in rule] queue.extend([child for child in children if not child.label == root]) v.children",
"changed = True while changed: changed = False for k, rule_set in list(rdict.items()):",
"rules_dict and returns iterative proof tree.\"\"\" root_node = Node(root) queue = deque([root_node]) while",
"RulesDict, root: int) -> Node: \"\"\"Return random tree found by breadth first search.\"\"\"",
"\"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\") RulesDict = Dict[int, Set[Tuple[int, ...]]] class Node: \"\"\"A node for a",
"-> None: \"\"\" Prune all nodes not in a combinatorial specification. This changes",
"in a iterative pruned rules_dict and returns iterative proof tree.\"\"\" root_node = Node(root)",
"int) -> Node: \"\"\"Return random tree found by breadth first search.\"\"\" seen: Set[int]",
"labels in the proof tree.\"\"\" res = set([self.label]) res.update(chain.from_iterable(node.labels() for node in self.children))",
"Node(root) if root in seen or () in rule_set: seen.add(root) return seen, root_node",
"time.time() smallest_so_far = random_proof_tree(rules_dict, root=root) smallest_size = len(smallest_so_far) while time.time() - start_time <",
"the set of all labels in the proof tree.\"\"\" res = set([self.label]) res.update(chain.from_iterable(node.labels()",
"one specification for a small specification. Spends minimization_time_limit seconds searching. \"\"\" start_time =",
"else: root, roots = root_labels[0], root_labels[1:] new_max = maximum - len(root_labels) + 1",
"None ) -> Iterator[Node]: \"\"\"A generator for all proof trees using depth first",
"iterative pruned rules_dict and returns iterative proof tree.\"\"\" root_node = Node(root) queue =",
"seen = seen.copy() if root in rules_dict: rule_set = rules_dict[root] root_node = Node(root)",
"Optional[Set[int]] = None): \"\"\"Return random proof tree found by depth first search.\"\"\" if",
"def _dfs_forest( root_labels: Sequence[int], seen: FrozenSet[int], maximum: Optional[int] = None ) -> Iterator[Tuple[FrozenSet[int],",
"for rule in rules_dict[root_label]: for children in product( *[_bfs_helper(child_label, next_seen) for child_label in",
"+ sum(len(t) for t in trees) if maximum is not None and actual_length",
"= Dict[int, Set[Tuple[int, ...]]] class Node: \"\"\"A node for a proof tree.\"\"\" def",
"start: tuple(sorted(ends)) for start, ends in rules_dict.items() } if root in sorted_rules_dict: for",
"a combinatorial specification, that we call a proof tree. \"\"\" import time from",
"frozenset(), maximum): yield tree def iterative_proof_tree_finder(rules_dict: RulesDict, root: int) -> Node: \"\"\"Finds an",
"in list(rdict.items()): for rule in list(rule_set): if any(x not in rdict for x",
"trees sorted_rules_dict = { start: tuple(sorted(ends)) for start, ends in rules_dict.items() } if",
"start, ends in rules_dict.items() } if root in sorted_rules_dict: yield from _bfs_helper(root, frozenset())",
"for i in rule] queue.extend([child for child in children if not child.label ==",
"yield seen, Node(root_label) else: for new_seen, children in _dfs_forest(rule, seen, maximum): root_node =",
"maximum is not None else None for seen1, tree in _dfs_tree(root, seen, new_max):",
"tuple(sorted(ends)) for start, ends in rules_dict.items() } if root in sorted_rules_dict: for _,",
"queue: v = queue.popleft() rule = sorted(rules_dict[v.label])[0] if not rule == (): children",
"len(next_tree) if next_tree_size < smallest_size: smallest_so_far = next_tree smallest_size = next_tree_size return smallest_so_far",
"= False for k, rule_set in list(rdict.items()): for rule in list(rule_set): if any(x",
"root_labels[1:] new_max = maximum - len(root_labels) + 1 if maximum is not None",
"length if maximum is not None else None for seen2, trees in _dfs_forest(roots,",
"yield from node.nodes() def __str__(self) -> str: return \"\".join([\"(\", str(self.label), *map(str, self.children), \")\"])",
"have been seen, together with all of the trees using the given roots..\"\"\"",
"in rules_dict: rule_set = rules_dict[root] root_node = Node(root) if root in seen or",
"known to contain at least one specification for a small specification. Spends minimization_time_limit",
"Set, Tuple __all__ = (\"prune\", \"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\") RulesDict = Dict[int, Set[Tuple[int, ...]]] class",
"rules_dict: rule_set = rules_dict[root] root_node = Node(root) if root in seen or ()",
"child in children if not child.label == root]) v.children = children return root_node",
"random_proof_tree(rules_dict: RulesDict, root: int) -> Node: \"\"\"Return random tree found by breadth first",
"Node(root) queue = deque([root_node]) while queue: v = queue.popleft() rule = choice(list(rules_dict[v.label])) if",
"int) -> Iterator[Node]: \"\"\"A generator for all proof trees using breadth first search.",
"in product( *[_bfs_helper(child_label, next_seen) for child_label in rule] ): root_node = Node(root_label) root_node.children",
"and actual_length < maximum: yield seen1.union(seen2), [tree] + trees sorted_rules_dict = { start:",
"} if root in sorted_rules_dict: for _, tree in _dfs_tree(root, frozenset(), maximum): yield",
"-> Tuple[Set[int], List[Node]]: \"\"\"Return all labels which have been seen, together with all",
"= queue.popleft() rule = sorted(rules_dict[v.label])[0] if not rule == (): children = [Node(i)",
"def _bfs_helper(root_label: int, seen: FrozenSet[int]): if root_label in seen: yield Node(root_label) return next_seen",
"one exists. \"\"\" trees: Dict[int, Node] = {} def get_tree(start): if start ==",
"trees using depth first search. N.B. The rules_dict is assumed to be pruned.",
"for x in rule): rule_set.remove(rule) changed = True if not rule_set: del rdict[k]",
"returns iterative proof tree.\"\"\" root_node = Node(root) queue = deque([root_node]) while queue: v",
"root in sorted_rules_dict: for _, tree in _dfs_tree(root, frozenset(), maximum): yield tree def",
"-> Set[int]: \"\"\"Return the set of all labels in the proof tree.\"\"\" res",
"(v.label in seen or rule == ()): children = [Node(i) for i in",
"Iterator, List, Optional, Sequence, Set, Tuple __all__ = (\"prune\", \"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\") RulesDict =",
") -> Iterator[Tuple[FrozenSet[int], Node]]: if maximum is not None and maximum <= 0:",
"= { start: tuple(sorted(ends)) for start, ends in rules_dict.items() } if root in",
"Set[int] = set() root_node = Node(root) queue = deque([root_node]) while queue: v =",
"new_max = maximum - len(root_labels) + 1 if maximum is not None else",
"start == root: return Node(start) if start in trees: return trees[start] raise KeyError(\"{}",
"nodes(self) -> Iterator[\"Node\"]: \"\"\"Yield all nodes in the proof tree.\"\"\" yield self for",
"- length if maximum is not None else None for seen2, trees in",
"children in _dfs_forest(rule, seen, maximum): root_node = Node(root_label) root_node.children = children yield new_seen,",
"deque([root_node]) while queue: v = queue.popleft() rule = sorted(rules_dict[v.label])[0] if not rule ==",
"seen) root_node.children = trees return visited, root_node def all_proof_trees_dfs( rules_dict: RulesDict, roots: Sequence[int],",
"choice(list(rule_set)) visited, trees = all_proof_trees_dfs(rules_dict, rule, seen) root_node.children = trees return visited, root_node",
"_dfs_tree( root_label: int, seen: FrozenSet[int], maximum: int = None ) -> Iterator[Tuple[FrozenSet[int], Node]]:",
"root: int) -> Node: \"\"\"Return random tree found by breadth first search.\"\"\" seen:",
"del rdict[k] def iterative_prune(rules_dict: RulesDict, root: Optional[int] = None) -> RulesDict: \"\"\"Prune all",
"start in trees: return trees[start] raise KeyError(\"{} is not in trees\".format(start)) def create_tree(start,",
"= set() if root is not None: verified_labels.add(root) rdict = deepcopy(rules_dict) new_rules_dict: RulesDict",
"rule in sorted_rules_dict[root_label]: if rule == (): yield seen, Node(root_label) else: for new_seen,",
"Set[Tuple[int, ...]]] class Node: \"\"\"A node for a proof tree.\"\"\" def __init__(self, n:",
"RulesDict) -> None: \"\"\" Prune all nodes not in a combinatorial specification. This",
"is not None and maximum <= 0: return if root_label in seen: yield",
"generator for all proof trees using breadth first search. N.B. The rules_dict is",
"the proof tree.\"\"\" res = set([self.label]) res.update(chain.from_iterable(node.labels() for node in self.children)) return res",
"= seen.union((root_label,)) for rule in sorted_rules_dict[root_label]: if rule == (): yield seen, Node(root_label)",
"from copy import deepcopy from itertools import chain, product from random import choice,",
"if root_label in seen: yield Node(root_label) return next_seen = seen.union((root_label,)) for rule in",
"= deque([root_node]) while queue: v = queue.popleft() rule = choice(list(rules_dict[v.label])) if not (v.label",
"False for k, rule_set in list(rdict.items()): for rule in list(rule_set): if any(x not",
"trees: Dict[int, Node] = {} def get_tree(start): if start == root: return Node(start)",
"any(x not in rdict for x in rule): rule_set.remove(rule) changed = True if",
"= n self.children = children def labels(self) -> Set[int]: \"\"\"Return the set of",
"sum(len(c) for c in self.children) def prune(rdict: RulesDict) -> None: \"\"\" Prune all",
"root, if one exists. \"\"\" trees: Dict[int, Node] = {} def get_tree(start): if",
"def proof_tree_generator_bfs(rules_dict: RulesDict, root: int) -> Iterator[Node]: \"\"\"A generator for all proof trees",
"__all__ = (\"prune\", \"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\") RulesDict = Dict[int, Set[Tuple[int, ...]]] class Node: \"\"\"A",
"\")\"]) def __len__(self) -> int: \"\"\"Return the number nodes in the proof tree.\"\"\"",
"None: verified_labels.add(root) rdict = deepcopy(rules_dict) new_rules_dict: RulesDict = defaultdict(set) while True: changed =",
"in rule] shuffle(children) queue.extend(children) v.children = children seen.add(v.label) return root_node def smallish_random_proof_tree( rules_dict:",
"for root, if one exists. \"\"\" trees: Dict[int, Node] = {} def get_tree(start):",
"if seen is None: seen = set() seen = seen.copy() if root in",
"all(x in verified_labels for x in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) create_tree(k,",
"root is not None: verified_labels.add(root) rdict = deepcopy(rules_dict) new_rules_dict: RulesDict = defaultdict(set) while",
"rules_dict.items() } if root in sorted_rules_dict: yield from _bfs_helper(root, frozenset()) def proof_tree_generator_dfs( rules_dict:",
"defaultdict(set) while True: changed = False for k, rule_set in list(rdict.items()): for rule",
"next_tree_size < smallest_size: smallest_so_far = next_tree smallest_size = next_tree_size return smallest_so_far def proof_tree_generator_bfs(rules_dict:",
"random_proof_tree(rules_dict, root=root) smallest_size = len(smallest_so_far) while time.time() - start_time < minimization_time_limit: next_tree =",
"next_tree smallest_size = next_tree_size return smallest_so_far def proof_tree_generator_bfs(rules_dict: RulesDict, root: int) -> Iterator[Node]:",
"maximum: yield seen1.union(seen2), [tree] + trees sorted_rules_dict = { start: tuple(sorted(ends)) for start,",
"= children trees[start] = root verified_labels = set() if root is not None:",
"rdict[k] def iterative_prune(rules_dict: RulesDict, root: Optional[int] = None) -> RulesDict: \"\"\"Prune all nodes",
"changed = False for k, rule_set in list(rdict.items()): for rule in list(rule_set): if",
"RulesDict: \"\"\"Prune all nodes not iteratively verifiable.\"\"\" verified_labels: Set[int] = set() if root",
"\"\"\"A node for a proof tree.\"\"\" def __init__(self, n: int, children: Optional[List[\"Node\"]] =",
"RulesDict, root: int, minimization_time_limit: float ) -> Node: \"\"\" Searches a rule_dict known",
"seen: yield Node(root_label) return next_seen = seen.union((root_label,)) for rule in rules_dict[root_label]: for children",
"else None for seen2, trees in _dfs_forest(roots, seen1, new_maximum): actual_length = length +",
"for seen2, trees in _dfs_forest(roots, seen1, new_maximum): actual_length = length + sum(len(t) for",
"= sorted(rules_dict[v.label])[0] if not rule == (): children = [Node(i) for i in",
"-> Iterator[Node]: \"\"\"A generator for all proof trees using depth first search. N.B.",
"int, seen: FrozenSet[int], maximum: int = None ) -> Iterator[Tuple[FrozenSet[int], Node]]: if maximum",
"tree.\"\"\" yield self for node in self.children: yield from node.nodes() def __str__(self) ->",
"FrozenSet[int], maximum: int = None ) -> Iterator[Tuple[FrozenSet[int], Node]]: if maximum is not",
"return \"\".join([\"(\", str(self.label), *map(str, self.children), \")\"]) def __len__(self) -> int: \"\"\"Return the number",
"for node in self.children: yield from node.nodes() def __str__(self) -> str: return \"\".join([\"(\",",
"= [Node(i) for i in rule] queue.extend([child for child in children if not",
"_bfs_helper(root, frozenset()) def proof_tree_generator_dfs( rules_dict: RulesDict, root: int, maximum: Optional[int] = None )",
"True verified_labels.add(k) new_rules_dict[k].add(rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if not changed: break",
"if start == root: return Node(start) if start in trees: return trees[start] raise",
"trees: return root = Node(start) children = [get_tree(i) for i in end] root.children",
"searching. \"\"\" start_time = time.time() smallest_so_far = random_proof_tree(rules_dict, root=root) smallest_size = len(smallest_so_far) while",
"return seen, root_node seen.add(root) rule = choice(list(rule_set)) visited, trees = all_proof_trees_dfs(rules_dict, rule, seen)",
"Node(root_label) root_node.children = children yield new_seen, root_node def _dfs_forest( root_labels: Sequence[int], seen: FrozenSet[int],",
"= set() seen = seen.copy() if root in rules_dict: rule_set = rules_dict[root] root_node",
"for i in end] root.children = children trees[start] = root verified_labels = set()",
"None else None for seen2, trees in _dfs_forest(roots, seen1, new_maximum): actual_length = length",
"actual_length < maximum: yield seen1.union(seen2), [tree] + trees sorted_rules_dict = { start: tuple(sorted(ends))",
"if root_label in seen: yield seen, Node(root_label) return seen = seen.union((root_label,)) for rule",
"seen, root_node seen.add(root) rule = choice(list(rule_set)) visited, trees = all_proof_trees_dfs(rules_dict, rule, seen) root_node.children",
"x in rule): rule_set.remove(rule) changed = True if not rule_set: del rdict[k] def",
"children seen.add(v.label) return root_node def smallish_random_proof_tree( rules_dict: RulesDict, root: int, minimization_time_limit: float )",
"not rule_set: del rdict[k] if not changed: break if root in trees: return",
"return trees[start] raise KeyError(\"{} is not in trees\".format(start)) def create_tree(start, end): if start",
"[get_tree(i) for i in end] root.children = children trees[start] = root verified_labels =",
"smallest_so_far = next_tree smallest_size = next_tree_size return smallest_so_far def proof_tree_generator_bfs(rules_dict: RulesDict, root: int)",
"_, tree in _dfs_tree(root, frozenset(), maximum): yield tree def iterative_proof_tree_finder(rules_dict: RulesDict, root: int)",
"chain, product from random import choice, shuffle from typing import Dict, FrozenSet, Iterator,",
"product from random import choice, shuffle from typing import Dict, FrozenSet, Iterator, List,",
"node in self.children: yield from node.nodes() def __str__(self) -> str: return \"\".join([\"(\", str(self.label),",
"roots, seen1) return seen1.union(seen2), [tree] + trees def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) ->",
"from collections import defaultdict, deque from copy import deepcopy from itertools import chain,",
"= deepcopy(rules_dict) new_rules_dict: RulesDict = defaultdict(set) while True: changed = False for k,",
"if not rule == (): children = [Node(i) for i in rule] queue.extend([child",
"len(tree) new_maximum = maximum - length if maximum is not None else None",
"first search. N.B. The rules_dict is assumed to be pruned. \"\"\" def _dfs_tree(",
"least one specification for a small specification. Spends minimization_time_limit seconds searching. \"\"\" start_time",
"int) -> Node: \"\"\"Finds an iterative proof tree for root, if one exists.",
"return Node(start) if start in trees: return trees[start] raise KeyError(\"{} is not in",
"in trees: return root = Node(start) children = [get_tree(i) for i in end]",
"return root_node def random_proof_tree(rules_dict: RulesDict, root: int) -> Node: \"\"\"Return random tree found",
"< maximum: yield seen1.union(seen2), [tree] + trees sorted_rules_dict = { start: tuple(sorted(ends)) for",
"None and maximum <= 0: return if root_label in seen: yield seen, Node(root_label)",
"new_rules_dict[k].add(rule) create_tree(k, rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if not changed: break",
"labels(self) -> Set[int]: \"\"\"Return the set of all labels in the proof tree.\"\"\"",
"seconds searching. \"\"\" start_time = time.time() smallest_so_far = random_proof_tree(rules_dict, root=root) smallest_size = len(smallest_so_far)",
"= None) -> RulesDict: \"\"\"Prune all nodes not iteratively verifiable.\"\"\" verified_labels: Set[int] =",
"= choice(list(rule_set)) visited, trees = all_proof_trees_dfs(rules_dict, rule, seen) root_node.children = trees return visited,",
"in _dfs_forest(rule, seen, maximum): root_node = Node(root_label) root_node.children = children yield new_seen, root_node",
"contain at least one specification for a small specification. Spends minimization_time_limit seconds searching.",
"sorted_rules_dict = { start: tuple(sorted(ends)) for start, ends in rules_dict.items() } if root",
"seen: Optional[Set[int]] = None ) -> Tuple[Set[int], List[Node]]: \"\"\"Return all labels which have",
"start: tuple(sorted(ends)) for start, ends in rules_dict.items() } if root in sorted_rules_dict: yield",
"= (\"prune\", \"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\") RulesDict = Dict[int, Set[Tuple[int, ...]]] class Node: \"\"\"A node",
"proof tree.\"\"\" return 1 + sum(len(c) for c in self.children) def prune(rdict: RulesDict)",
"new_max): length = len(tree) new_maximum = maximum - length if maximum is not",
"-> Iterator[\"Node\"]: \"\"\"Yield all nodes in the proof tree.\"\"\" yield self for node",
"= [get_tree(i) for i in end] root.children = children trees[start] = root verified_labels",
"if not roots: return seen, [] root, roots = roots[0], roots[1:] seen1, tree",
"create_tree(start, end): if start in trees: return root = Node(start) children = [get_tree(i)",
"breadth first search.\"\"\" seen: Set[int] = set() root_node = Node(root) queue = deque([root_node])",
") -> Iterator[Tuple[FrozenSet[int], List[Node]]]: if maximum is not None and maximum <= 0:",
"from itertools import chain, product from random import choice, shuffle from typing import",
"Finds and returns a combinatorial specification, that we call a proof tree. \"\"\"",
"def prune(rdict: RulesDict) -> None: \"\"\" Prune all nodes not in a combinatorial",
"deque from copy import deepcopy from itertools import chain, product from random import",
"None and actual_length < maximum: yield seen1.union(seen2), [tree] + trees sorted_rules_dict = {",
"deepcopy(rules_dict) new_rules_dict: RulesDict = defaultdict(set) while True: changed = False for k, rule_set",
"trees = all_proof_trees_dfs(rules_dict, rule, seen) root_node.children = trees return visited, root_node def all_proof_trees_dfs(",
"rules_dict[root] root_node = Node(root) if root in seen or () in rule_set: seen.add(root)",
"for child in children if not child.label == root]) v.children = children return",
"for k, rule_set in list(rdict.items()): for rule in list(rule_set): if all(x in verified_labels",
"is None: children = [] self.label = n self.children = children def labels(self)",
"Searches a rule_dict known to contain at least one specification for a small",
"smallest_size = len(smallest_so_far) while time.time() - start_time < minimization_time_limit: next_tree = random_proof_tree(rules_dict, root=root)",
"assumed to be pruned. \"\"\" def _bfs_helper(root_label: int, seen: FrozenSet[int]): if root_label in",
"seen.union((root_label,)) for rule in rules_dict[root_label]: for children in product( *[_bfs_helper(child_label, next_seen) for child_label",
"FrozenSet, Iterator, List, Optional, Sequence, Set, Tuple __all__ = (\"prune\", \"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\") RulesDict",
"int, seen: Optional[Set[int]] = None): \"\"\"Return random proof tree found by depth first",
"depth first search. N.B. The rules_dict is assumed to be pruned. \"\"\" def",
"-> Node: \"\"\"Return random tree found by breadth first search.\"\"\" seen: Set[int] =",
"length = len(tree) new_maximum = maximum - length if maximum is not None",
"verified_labels.add(k) new_rules_dict[k].add(rule) create_tree(k, rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if not changed:",
"RulesDict = defaultdict(set) while True: changed = False for k, rule_set in list(rdict.items()):",
"Spends minimization_time_limit seconds searching. \"\"\" start_time = time.time() smallest_so_far = random_proof_tree(rules_dict, root=root) smallest_size",
"= children yield root_node sorted_rules_dict = { start: tuple(sorted(ends)) for start, ends in",
"specification, that we call a proof tree. \"\"\" import time from collections import",
"i in end] root.children = children trees[start] = root verified_labels = set() if",
"in verified_labels for x in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) create_tree(k, rule)",
"root in seen or () in rule_set: seen.add(root) return seen, root_node seen.add(root) rule",
"yield seen, [] else: root, roots = root_labels[0], root_labels[1:] new_max = maximum -",
"trees[start] = root verified_labels = set() if root is not None: verified_labels.add(root) rdict",
"*map(str, self.children), \")\"]) def __len__(self) -> int: \"\"\"Return the number nodes in the",
"rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if",
"Tuple __all__ = (\"prune\", \"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\") RulesDict = Dict[int, Set[Tuple[int, ...]]] class Node:",
"None for seen1, tree in _dfs_tree(root, seen, new_max): length = len(tree) new_maximum =",
"for rule in sorted_rules_dict[root_label]: if rule == (): yield seen, Node(root_label) else: for",
"Optional[List[\"Node\"]] = None): if children is None: children = [] self.label = n",
"float ) -> Node: \"\"\" Searches a rule_dict known to contain at least",
"in rdict for x in rule): rule_set.remove(rule) changed = True if not rule_set:",
"Set[int] = set() if root is not None: verified_labels.add(root) rdict = deepcopy(rules_dict) new_rules_dict:",
"\"\"\"Return random proof tree found by depth first search.\"\"\" if seen is None:",
"roots: Sequence[int], seen: Optional[Set[int]] = None ) -> Tuple[Set[int], List[Node]]: \"\"\"Return all labels",
"tree found by depth first search.\"\"\" if seen is None: seen = set()",
"proof tree.\"\"\" root_node = Node(root) queue = deque([root_node]) while queue: v = queue.popleft()",
"while changed: changed = False for k, rule_set in list(rdict.items()): for rule in",
"trees using the given roots..\"\"\" if seen is None: seen = set() if",
"not None else None for seen1, tree in _dfs_tree(root, seen, new_max): length =",
"trees\".format(start)) def create_tree(start, end): if start in trees: return root = Node(start) children",
"int) -> Node: \"\"\"Takes in a iterative pruned rules_dict and returns iterative proof",
"KeyError(\"{} is not in trees\".format(start)) def create_tree(start, end): if start in trees: return",
"new_rules_dict: RulesDict = defaultdict(set) while True: changed = False for k, rule_set in",
"tree.\"\"\" return 1 + sum(len(c) for c in self.children) def prune(rdict: RulesDict) ->",
"None: children = [] self.label = n self.children = children def labels(self) ->",
"trees = all_proof_trees_dfs(rules_dict, roots, seen1) return seen1.union(seen2), [tree] + trees def iterative_proof_tree_bfs(rules_dict: RulesDict,",
"from random import choice, shuffle from typing import Dict, FrozenSet, Iterator, List, Optional,",
"all_proof_trees_dfs(rules_dict, roots, seen1) return seen1.union(seen2), [tree] + trees def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int)",
"for k, rule_set in list(rdict.items()): for rule in list(rule_set): if any(x not in",
"-> Iterator[Node]: \"\"\"A generator for all proof trees using breadth first search. N.B.",
"Optional[int] = None ) -> Iterator[Tuple[FrozenSet[int], List[Node]]]: if maximum is not None and",
"seen, together with all of the trees using the given roots..\"\"\" if seen",
"yield from _bfs_helper(root, frozenset()) def proof_tree_generator_dfs( rules_dict: RulesDict, root: int, maximum: Optional[int] =",
"is not None else None for seen1, tree in _dfs_tree(root, seen, new_max): length",
"length + sum(len(t) for t in trees) if maximum is not None and",
"in list(rdict.items()): for rule in list(rule_set): if all(x in verified_labels for x in",
"a proof tree.\"\"\" def __init__(self, n: int, children: Optional[List[\"Node\"]] = None): if children",
"the proof tree.\"\"\" yield self for node in self.children: yield from node.nodes() def",
"k, rule_set in list(rdict.items()): for rule in list(rule_set): if any(x not in rdict",
"roots = roots[0], roots[1:] seen1, tree = proof_tree_dfs(rules_dict, root, seen) seen2, trees =",
"is not None and actual_length < maximum: yield seen1.union(seen2), [tree] + trees sorted_rules_dict",
"<reponame>odinn13/comb_spec_searcher-1 \"\"\" Finds and returns a combinatorial specification, that we call a proof",
"defaultdict, deque from copy import deepcopy from itertools import chain, product from random",
"\"\"\"Return the set of all labels in the proof tree.\"\"\" res = set([self.label])",
"def __str__(self) -> str: return \"\".join([\"(\", str(self.label), *map(str, self.children), \")\"]) def __len__(self) ->",
"<= 0: return if root_label in seen: yield seen, Node(root_label) return seen =",
"for all proof trees using depth first search. N.B. The rules_dict is assumed",
"list(rdict.items()): for rule in list(rule_set): if any(x not in rdict for x in",
"return 1 + sum(len(c) for c in self.children) def prune(rdict: RulesDict) -> None:",
"res = set([self.label]) res.update(chain.from_iterable(node.labels() for node in self.children)) return res def nodes(self) ->",
"if start in trees: return trees[start] raise KeyError(\"{} is not in trees\".format(start)) def",
"in seen or () in rule_set: seen.add(root) return seen, root_node seen.add(root) rule =",
"shuffle from typing import Dict, FrozenSet, Iterator, List, Optional, Sequence, Set, Tuple __all__",
"in self.children: yield from node.nodes() def __str__(self) -> str: return \"\".join([\"(\", str(self.label), *map(str,",
"= all_proof_trees_dfs(rules_dict, rule, seen) root_node.children = trees return visited, root_node def all_proof_trees_dfs( rules_dict:",
"if next_tree_size < smallest_size: smallest_so_far = next_tree smallest_size = next_tree_size return smallest_so_far def",
"root_node def _dfs_forest( root_labels: Sequence[int], seen: FrozenSet[int], maximum: Optional[int] = None ) ->",
"if one exists. \"\"\" trees: Dict[int, Node] = {} def get_tree(start): if start",
"break return new_rules_dict def proof_tree_dfs(rules_dict: RulesDict, root: int, seen: Optional[Set[int]] = None): \"\"\"Return",
"and maximum <= 0: return if not root_labels: yield seen, [] else: root,",
"= None ) -> Iterator[Tuple[FrozenSet[int], List[Node]]]: if maximum is not None and maximum",
"next_tree_size return smallest_so_far def proof_tree_generator_bfs(rules_dict: RulesDict, root: int) -> Iterator[Node]: \"\"\"A generator for",
"Node: \"\"\"A node for a proof tree.\"\"\" def __init__(self, n: int, children: Optional[List[\"Node\"]]",
"root=root) smallest_size = len(smallest_so_far) while time.time() - start_time < minimization_time_limit: next_tree = random_proof_tree(rules_dict,",
"for all proof trees using breadth first search. N.B. The rules_dict is assumed",
"seen: FrozenSet[int], maximum: int = None ) -> Iterator[Tuple[FrozenSet[int], Node]]: if maximum is",
"of all labels in the proof tree.\"\"\" res = set([self.label]) res.update(chain.from_iterable(node.labels() for node",
"tree = proof_tree_dfs(rules_dict, root, seen) seen2, trees = all_proof_trees_dfs(rules_dict, roots, seen1) return seen1.union(seen2),",
") -> Node: \"\"\" Searches a rule_dict known to contain at least one",
"True verified_labels.add(k) new_rules_dict[k].add(rule) create_tree(k, rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if not",
"root in rules_dict: rule_set = rules_dict[root] root_node = Node(root) if root in seen",
"root: int, maximum: Optional[int] = None ) -> Iterator[Node]: \"\"\"A generator for all",
"\"\"\" changed = True while changed: changed = False for k, rule_set in",
"Iterator[Tuple[FrozenSet[int], List[Node]]]: if maximum is not None and maximum <= 0: return if",
"ends in rules_dict.items() } if root in sorted_rules_dict: for _, tree in _dfs_tree(root,",
"if not changed: break return new_rules_dict def proof_tree_dfs(rules_dict: RulesDict, root: int, seen: Optional[Set[int]]",
"Optional[int] = None ) -> Iterator[Node]: \"\"\"A generator for all proof trees using",
"collections import defaultdict, deque from copy import deepcopy from itertools import chain, product",
"not child.label == root]) v.children = children return root_node def random_proof_tree(rules_dict: RulesDict, root:",
"visited, root_node def all_proof_trees_dfs( rules_dict: RulesDict, roots: Sequence[int], seen: Optional[Set[int]] = None )",
"else None for seen1, tree in _dfs_tree(root, seen, new_max): length = len(tree) new_maximum",
"0: return if root_label in seen: yield seen, Node(root_label) return seen = seen.union((root_label,))",
"actual_length = length + sum(len(t) for t in trees) if maximum is not",
"self.children) def prune(rdict: RulesDict) -> None: \"\"\" Prune all nodes not in a",
"first search.\"\"\" if seen is None: seen = set() seen = seen.copy() if",
"new_seen, children in _dfs_forest(rule, seen, maximum): root_node = Node(root_label) root_node.children = children yield",
"__str__(self) -> str: return \"\".join([\"(\", str(self.label), *map(str, self.children), \")\"]) def __len__(self) -> int:",
"return if root_label in seen: yield seen, Node(root_label) return seen = seen.union((root_label,)) for",
"import defaultdict, deque from copy import deepcopy from itertools import chain, product from",
"new_maximum): actual_length = length + sum(len(t) for t in trees) if maximum is",
"for _, tree in _dfs_tree(root, frozenset(), maximum): yield tree def iterative_proof_tree_finder(rules_dict: RulesDict, root:",
"c in self.children) def prune(rdict: RulesDict) -> None: \"\"\" Prune all nodes not",
"and returns iterative proof tree.\"\"\" root_node = Node(root) queue = deque([root_node]) while queue:",
"tree for root, if one exists. \"\"\" trees: Dict[int, Node] = {} def",
"Node(root) queue = deque([root_node]) while queue: v = queue.popleft() rule = sorted(rules_dict[v.label])[0] if",
"a proof tree. \"\"\" import time from collections import defaultdict, deque from copy",
"List[Node]]: \"\"\"Return all labels which have been seen, together with all of the",
"= time.time() smallest_so_far = random_proof_tree(rules_dict, root=root) smallest_size = len(smallest_so_far) while time.time() - start_time",
"next_tree_size = len(next_tree) if next_tree_size < smallest_size: smallest_so_far = next_tree smallest_size = next_tree_size",
"\"proof_tree_generator_bfs\") RulesDict = Dict[int, Set[Tuple[int, ...]]] class Node: \"\"\"A node for a proof",
"= None ) -> Tuple[Set[int], List[Node]]: \"\"\"Return all labels which have been seen,",
"children def labels(self) -> Set[int]: \"\"\"Return the set of all labels in the",
"in rules_dict.items() } if root in sorted_rules_dict: yield from _bfs_helper(root, frozenset()) def proof_tree_generator_dfs(",
"== ()): children = [Node(i) for i in rule] shuffle(children) queue.extend(children) v.children =",
"\"\"\" Searches a rule_dict known to contain at least one specification for a",
"not in a combinatorial specification. This changes rdict in place. \"\"\" changed =",
"end] root.children = children trees[start] = root verified_labels = set() if root is",
"= set() root_node = Node(root) queue = deque([root_node]) while queue: v = queue.popleft()",
"at least one specification for a small specification. Spends minimization_time_limit seconds searching. \"\"\"",
"= len(smallest_so_far) while time.time() - start_time < minimization_time_limit: next_tree = random_proof_tree(rules_dict, root=root) next_tree_size",
"list(rule_set): if all(x in verified_labels for x in rule): changed = True verified_labels.add(k)",
"return seen, [] root, roots = roots[0], roots[1:] seen1, tree = proof_tree_dfs(rules_dict, root,",
"from node.nodes() def __str__(self) -> str: return \"\".join([\"(\", str(self.label), *map(str, self.children), \")\"]) def",
"queue.popleft() rule = sorted(rules_dict[v.label])[0] if not rule == (): children = [Node(i) for",
"return seen = seen.union((root_label,)) for rule in sorted_rules_dict[root_label]: if rule == (): yield",
"Node(root_label) return next_seen = seen.union((root_label,)) for rule in rules_dict[root_label]: for children in product(",
"root.children = children trees[start] = root verified_labels = set() if root is not",
"not iteratively verifiable.\"\"\" verified_labels: Set[int] = set() if root is not None: verified_labels.add(root)",
"all_proof_trees_dfs(rules_dict, rule, seen) root_node.children = trees return visited, root_node def all_proof_trees_dfs( rules_dict: RulesDict,",
"root: int, seen: Optional[Set[int]] = None): \"\"\"Return random proof tree found by depth",
"sorted(rules_dict[v.label])[0] if not rule == (): children = [Node(i) for i in rule]",
"...]]] class Node: \"\"\"A node for a proof tree.\"\"\" def __init__(self, n: int,",
"root_label in seen: yield seen, Node(root_label) return seen = seen.union((root_label,)) for rule in",
"def proof_tree_dfs(rules_dict: RulesDict, root: int, seen: Optional[Set[int]] = None): \"\"\"Return random proof tree",
"set() if not roots: return seen, [] root, roots = roots[0], roots[1:] seen1,",
"not None and actual_length < maximum: yield seen1.union(seen2), [tree] + trees sorted_rules_dict =",
"root_node def random_proof_tree(rules_dict: RulesDict, root: int) -> Node: \"\"\"Return random tree found by",
"= trees return visited, root_node def all_proof_trees_dfs( rules_dict: RulesDict, roots: Sequence[int], seen: Optional[Set[int]]",
"ends in rules_dict.items() } if root in sorted_rules_dict: yield from _bfs_helper(root, frozenset()) def",
"= True while changed: changed = False for k, rule_set in list(rdict.items()): for",
"found by breadth first search.\"\"\" seen: Set[int] = set() root_node = Node(root) queue",
"seen1, new_maximum): actual_length = length + sum(len(t) for t in trees) if maximum",
"not rule_set: del rdict[k] if not changed: break return new_rules_dict def proof_tree_dfs(rules_dict: RulesDict,",
"-> RulesDict: \"\"\"Prune all nodes not iteratively verifiable.\"\"\" verified_labels: Set[int] = set() if",
"changed = True verified_labels.add(k) new_rules_dict[k].add(rule) create_tree(k, rule) rdict[k].remove(rule) if not rule_set: del rdict[k]",
"def proof_tree_generator_dfs( rules_dict: RulesDict, root: int, maximum: Optional[int] = None ) -> Iterator[Node]:",
"None) -> RulesDict: \"\"\"Prune all nodes not iteratively verifiable.\"\"\" verified_labels: Set[int] = set()",
"x in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) rdict[k].remove(rule) if not rule_set: del",
"in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) rdict[k].remove(rule) if not rule_set: del rdict[k]",
"search.\"\"\" if seen is None: seen = set() seen = seen.copy() if root",
"self.children)) return res def nodes(self) -> Iterator[\"Node\"]: \"\"\"Yield all nodes in the proof",
"rule] queue.extend([child for child in children if not child.label == root]) v.children =",
"that we call a proof tree. \"\"\" import time from collections import defaultdict,",
"frozenset()) def proof_tree_generator_dfs( rules_dict: RulesDict, root: int, maximum: Optional[int] = None ) ->",
"all proof trees using depth first search. N.B. The rules_dict is assumed to",
"is not in trees\".format(start)) def create_tree(start, end): if start in trees: return root",
"n self.children = children def labels(self) -> Set[int]: \"\"\"Return the set of all",
"v.children = children seen.add(v.label) return root_node def smallish_random_proof_tree( rules_dict: RulesDict, root: int, minimization_time_limit:",
"root: int) -> Iterator[Node]: \"\"\"A generator for all proof trees using breadth first",
"return res def nodes(self) -> Iterator[\"Node\"]: \"\"\"Yield all nodes in the proof tree.\"\"\"",
"= deque([root_node]) while queue: v = queue.popleft() rule = sorted(rules_dict[v.label])[0] if not rule",
"Iterator[Tuple[FrozenSet[int], Node]]: if maximum is not None and maximum <= 0: return if",
"root = Node(start) children = [get_tree(i) for i in end] root.children = children",
"pruned. \"\"\" def _dfs_tree( root_label: int, seen: FrozenSet[int], maximum: int = None )",
"if maximum is not None else None for seen2, trees in _dfs_forest(roots, seen1,",
"in rule] ): root_node = Node(root_label) root_node.children = children yield root_node sorted_rules_dict =",
"seen, Node(root_label) return seen = seen.union((root_label,)) for rule in sorted_rules_dict[root_label]: if rule ==",
"in list(rule_set): if any(x not in rdict for x in rule): rule_set.remove(rule) changed",
"import choice, shuffle from typing import Dict, FrozenSet, Iterator, List, Optional, Sequence, Set,",
"is not None: verified_labels.add(root) rdict = deepcopy(rules_dict) new_rules_dict: RulesDict = defaultdict(set) while True:",
"pruned rules_dict and returns iterative proof tree.\"\"\" root_node = Node(root) queue = deque([root_node])",
"random tree found by breadth first search.\"\"\" seen: Set[int] = set() root_node =",
"seen = seen.union((root_label,)) for rule in sorted_rules_dict[root_label]: if rule == (): yield seen,",
"List[Node]]]: if maximum is not None and maximum <= 0: return if not",
"nodes in the proof tree.\"\"\" yield self for node in self.children: yield from",
"List, Optional, Sequence, Set, Tuple __all__ = (\"prune\", \"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\") RulesDict = Dict[int,",
") -> Tuple[Set[int], List[Node]]: \"\"\"Return all labels which have been seen, together with",
"root_node seen.add(root) rule = choice(list(rule_set)) visited, trees = all_proof_trees_dfs(rules_dict, rule, seen) root_node.children =",
"start_time = time.time() smallest_so_far = random_proof_tree(rules_dict, root=root) smallest_size = len(smallest_so_far) while time.time() -",
"+ trees def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) -> Node: \"\"\"Takes in a iterative",
"a rule_dict known to contain at least one specification for a small specification.",
"sorted_rules_dict: yield from _bfs_helper(root, frozenset()) def proof_tree_generator_dfs( rules_dict: RulesDict, root: int, maximum: Optional[int]",
"list(rule_set): if any(x not in rdict for x in rule): rule_set.remove(rule) changed =",
"[Node(i) for i in rule] queue.extend([child for child in children if not child.label",
"proof_tree_generator_bfs(rules_dict: RulesDict, root: int) -> Iterator[Node]: \"\"\"A generator for all proof trees using",
"not roots: return seen, [] root, roots = roots[0], roots[1:] seen1, tree =",
"Dict, FrozenSet, Iterator, List, Optional, Sequence, Set, Tuple __all__ = (\"prune\", \"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\")",
"{ start: tuple(sorted(ends)) for start, ends in rules_dict.items() } if root in sorted_rules_dict:",
"): root_node = Node(root_label) root_node.children = children yield root_node sorted_rules_dict = { start:",
"sum(len(t) for t in trees) if maximum is not None and actual_length <",
"assumed to be pruned. \"\"\" def _dfs_tree( root_label: int, seen: FrozenSet[int], maximum: int",
"Optional[int] = None) -> RulesDict: \"\"\"Prune all nodes not iteratively verifiable.\"\"\" verified_labels: Set[int]",
"been seen, together with all of the trees using the given roots..\"\"\" if",
"for a proof tree.\"\"\" def __init__(self, n: int, children: Optional[List[\"Node\"]] = None): if",
"in the proof tree.\"\"\" res = set([self.label]) res.update(chain.from_iterable(node.labels() for node in self.children)) return",
"== (): children = [Node(i) for i in rule] queue.extend([child for child in",
"seen, [] root, roots = roots[0], roots[1:] seen1, tree = proof_tree_dfs(rules_dict, root, seen)",
"root verified_labels = set() if root is not None: verified_labels.add(root) rdict = deepcopy(rules_dict)",
"v = queue.popleft() rule = choice(list(rules_dict[v.label])) if not (v.label in seen or rule",
"root_node = Node(root_label) root_node.children = children yield new_seen, root_node def _dfs_forest( root_labels: Sequence[int],",
"yield tree def iterative_proof_tree_finder(rules_dict: RulesDict, root: int) -> Node: \"\"\"Finds an iterative proof",
"Node(root_label) root_node.children = children yield root_node sorted_rules_dict = { start: tuple(sorted(ends)) for start,",
"all proof trees using breadth first search. N.B. The rules_dict is assumed to",
"root_label in seen: yield Node(root_label) return next_seen = seen.union((root_label,)) for rule in rules_dict[root_label]:",
"Sequence, Set, Tuple __all__ = (\"prune\", \"proof_tree_generator_dfs\", \"proof_tree_generator_bfs\") RulesDict = Dict[int, Set[Tuple[int, ...]]]",
"[Node(i) for i in rule] shuffle(children) queue.extend(children) v.children = children seen.add(v.label) return root_node",
"0: return if not root_labels: yield seen, [] else: root, roots = root_labels[0],",
"children = [] self.label = n self.children = children def labels(self) -> Set[int]:",
"changed: changed = False for k, rule_set in list(rdict.items()): for rule in list(rule_set):",
"return root = Node(start) children = [get_tree(i) for i in end] root.children =",
"rdict[k].remove(rule) if not rule_set: del rdict[k] if not changed: break if root in",
"= children return root_node def random_proof_tree(rules_dict: RulesDict, root: int) -> Node: \"\"\"Return random",
"= True if not rule_set: del rdict[k] def iterative_prune(rules_dict: RulesDict, root: Optional[int] =",
"and maximum <= 0: return if root_label in seen: yield seen, Node(root_label) return",
"else: for new_seen, children in _dfs_forest(rule, seen, maximum): root_node = Node(root_label) root_node.children =",
"root_labels: Sequence[int], seen: FrozenSet[int], maximum: Optional[int] = None ) -> Iterator[Tuple[FrozenSet[int], List[Node]]]: if",
"def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) -> Node: \"\"\"Takes in a iterative pruned rules_dict",
"set() seen = seen.copy() if root in rules_dict: rule_set = rules_dict[root] root_node =",
"maximum: Optional[int] = None ) -> Iterator[Node]: \"\"\"A generator for all proof trees",
"\"\"\"Finds an iterative proof tree for root, if one exists. \"\"\" trees: Dict[int,",
"return seen1.union(seen2), [tree] + trees def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) -> Node: \"\"\"Takes",
"= maximum - len(root_labels) + 1 if maximum is not None else None",
"in children if not child.label == root]) v.children = children return root_node def",
"the number nodes in the proof tree.\"\"\" return 1 + sum(len(c) for c",
"trees[start] raise KeyError(\"{} is not in trees\".format(start)) def create_tree(start, end): if start in",
"verified_labels for x in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) rdict[k].remove(rule) if not",
"return new_rules_dict def proof_tree_dfs(rules_dict: RulesDict, root: int, seen: Optional[Set[int]] = None): \"\"\"Return random",
"smallest_size: smallest_so_far = next_tree smallest_size = next_tree_size return smallest_so_far def proof_tree_generator_bfs(rules_dict: RulesDict, root:",
"iteratively verifiable.\"\"\" verified_labels: Set[int] = set() if root is not None: verified_labels.add(root) rdict",
"for children in product( *[_bfs_helper(child_label, next_seen) for child_label in rule] ): root_node =",
"Node]]: if maximum is not None and maximum <= 0: return if root_label",
"rule): rule_set.remove(rule) changed = True if not rule_set: del rdict[k] def iterative_prune(rules_dict: RulesDict,",
"if root is not None: verified_labels.add(root) rdict = deepcopy(rules_dict) new_rules_dict: RulesDict = defaultdict(set)",
"from typing import Dict, FrozenSet, Iterator, List, Optional, Sequence, Set, Tuple __all__ =",
"start in trees: return root = Node(start) children = [get_tree(i) for i in",
"= Node(root) queue = deque([root_node]) while queue: v = queue.popleft() rule = sorted(rules_dict[v.label])[0]",
"int, maximum: Optional[int] = None ) -> Iterator[Node]: \"\"\"A generator for all proof",
"seen) seen2, trees = all_proof_trees_dfs(rules_dict, roots, seen1) return seen1.union(seen2), [tree] + trees def",
"if root in sorted_rules_dict: for _, tree in _dfs_tree(root, frozenset(), maximum): yield tree",
"-> str: return \"\".join([\"(\", str(self.label), *map(str, self.children), \")\"]) def __len__(self) -> int: \"\"\"Return",
"queue.extend([child for child in children if not child.label == root]) v.children = children",
"new_rules_dict[k].add(rule) rdict[k].remove(rule) if not rule_set: del rdict[k] if not changed: break return new_rules_dict",
"while queue: v = queue.popleft() rule = choice(list(rules_dict[v.label])) if not (v.label in seen",
"rule == ()): children = [Node(i) for i in rule] shuffle(children) queue.extend(children) v.children",
"product( *[_bfs_helper(child_label, next_seen) for child_label in rule] ): root_node = Node(root_label) root_node.children =",
"seen is None: seen = set() seen = seen.copy() if root in rules_dict:",
"seen.add(root) rule = choice(list(rule_set)) visited, trees = all_proof_trees_dfs(rules_dict, rule, seen) root_node.children = trees",
"= [Node(i) for i in rule] shuffle(children) queue.extend(children) v.children = children seen.add(v.label) return",
"n: int, children: Optional[List[\"Node\"]] = None): if children is None: children = []",
"seen.add(v.label) return root_node def smallish_random_proof_tree( rules_dict: RulesDict, root: int, minimization_time_limit: float ) ->",
"trees def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) -> Node: \"\"\"Takes in a iterative pruned",
"(): yield seen, Node(root_label) else: for new_seen, children in _dfs_forest(rule, seen, maximum): root_node",
"first search.\"\"\" seen: Set[int] = set() root_node = Node(root) queue = deque([root_node]) while",
"x in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) create_tree(k, rule) rdict[k].remove(rule) if not",
"= Node(root) if root in seen or () in rule_set: seen.add(root) return seen,",
"rule_set in list(rdict.items()): for rule in list(rule_set): if all(x in verified_labels for x",
"yield new_seen, root_node def _dfs_forest( root_labels: Sequence[int], seen: FrozenSet[int], maximum: Optional[int] = None",
"if maximum is not None else None for seen1, tree in _dfs_tree(root, seen,",
"root: return Node(start) if start in trees: return trees[start] raise KeyError(\"{} is not",
"for new_seen, children in _dfs_forest(rule, seen, maximum): root_node = Node(root_label) root_node.children = children",
"specification. This changes rdict in place. \"\"\" changed = True while changed: changed",
"all(x in verified_labels for x in rule): changed = True verified_labels.add(k) new_rules_dict[k].add(rule) rdict[k].remove(rule)",
"not changed: break return new_rules_dict def proof_tree_dfs(rules_dict: RulesDict, root: int, seen: Optional[Set[int]] =",
"str: return \"\".join([\"(\", str(self.label), *map(str, self.children), \")\"]) def __len__(self) -> int: \"\"\"Return the",
"number nodes in the proof tree.\"\"\" return 1 + sum(len(c) for c in",
"rules_dict: RulesDict, roots: Sequence[int], seen: Optional[Set[int]] = None ) -> Tuple[Set[int], List[Node]]: \"\"\"Return",
"\"\"\"A generator for all proof trees using depth first search. N.B. The rules_dict",
"maximum is not None and actual_length < maximum: yield seen1.union(seen2), [tree] + trees",
"list(rdict.items()): for rule in list(rule_set): if all(x in verified_labels for x in rule):",
"proof tree.\"\"\" def __init__(self, n: int, children: Optional[List[\"Node\"]] = None): if children is",
"together with all of the trees using the given roots..\"\"\" if seen is",
"rules_dict[root_label]: for children in product( *[_bfs_helper(child_label, next_seen) for child_label in rule] ): root_node",
"seen.union((root_label,)) for rule in sorted_rules_dict[root_label]: if rule == (): yield seen, Node(root_label) else:",
"if not rule_set: del rdict[k] if not changed: break if root in trees:",
"{} def get_tree(start): if start == root: return Node(start) if start in trees:",
"rules_dict: RulesDict, root: int, maximum: Optional[int] = None ) -> Iterator[Node]: \"\"\"A generator",
"the trees using the given roots..\"\"\" if seen is None: seen = set()",
"for c in self.children) def prune(rdict: RulesDict) -> None: \"\"\" Prune all nodes",
"Iterator[Node]: \"\"\"A generator for all proof trees using depth first search. N.B. The",
"\"\"\" Finds and returns a combinatorial specification, that we call a proof tree.",
"we call a proof tree. \"\"\" import time from collections import defaultdict, deque",
"root_node.children = children yield new_seen, root_node def _dfs_forest( root_labels: Sequence[int], seen: FrozenSet[int], maximum:",
"for rule in list(rule_set): if all(x in verified_labels for x in rule): changed",
"is assumed to be pruned. \"\"\" def _dfs_tree( root_label: int, seen: FrozenSet[int], maximum:",
"verifiable.\"\"\" verified_labels: Set[int] = set() if root is not None: verified_labels.add(root) rdict =",
"next_seen = seen.union((root_label,)) for rule in rules_dict[root_label]: for children in product( *[_bfs_helper(child_label, next_seen)"
] |
[
"the Open Babel chemistry library', long_description=long_description, zip_safe=False, cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt, 'install': CustomInstall,",
"found') def locate_ob(): \"\"\"Try use pkgconfig to locate Open Babel, otherwise guess default",
"many languages of chemical data. It's an open, collaborative project allowing anyone to",
"% package) return stdout.strip() except OSError: raise PkgConfigError('pkg-config could not be found') def",
"try: version = pkgconfig('openbabel-2.0', '--modversion') if not StrictVersion(version) >= StrictVersion('2.3.0'): print('Warning: Open Babel",
"%s.\\nGuessing Open Babel location:' % e) include_dirs = '/usr/local/include/openbabel-2.0' library_dirs = '/usr/local/lib' return",
"distutils.command.sdist import sdist from distutils.errors import DistutilsExecError from distutils.version import StrictVersion from setuptools.command.build_ext",
"\"\"\"Custom build_ext to set SWIG options and print a better error message.\"\"\" def",
"sys from distutils.command.build import build from distutils.command.sdist import sdist from distutils.errors import DistutilsExecError",
"= pkgconfig('openbabel-2.0', '--modversion') if not StrictVersion(version) >= StrictVersion('2.3.0'): print('Warning: Open Babel 2.3.0 or",
":: Windows', 'Operating System :: OS Independent', 'Operating System :: POSIX', 'Operating System",
"'sdist': CustomSdist}, py_modules=['openbabel', 'pybel'], ext_modules=[obextension], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment",
"sdist.make_release_tree(self, base_dir, files) link = 'hard' if hasattr(os, 'link') else None self.copy_file('../stereo.i', base_dir,",
"print a better error message.\"\"\" def finalize_options(self): # Setting include_dirs, library_dirs, swig_opts here",
"self.run_command('build_ext') install.run(self) class CustomSdist(sdist): \"\"\"Add swig interface files into distribution from parent directory.\"\"\"",
"languages of chemical data. It's an open, collaborative project allowing anyone to search,",
"install') sys.exit(1) obextension = Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel']) setup(name='openbabel', version=__version__, author=__author__, author_email=__email__, license=__license__, url='http://openbabel.org/',",
"url='http://openbabel.org/', description='Python interface to the Open Babel chemistry library', long_description=long_description, zip_safe=False, cmdclass={'build': CustomBuild,",
"'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows',",
"convert, analyze, or store data from molecular modeling, chemistry, solid-state materials, biochemistry, or",
"sources, extension): try: return build_ext.swig_sources(self, sources, extension) except DistutilsExecError: print('\\nError: SWIG failed. Is",
"-L/usr/local/lib\\n' ' python setup.py install') sys.exit(1) obextension = Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel']) setup(name='openbabel', version=__version__,",
"from setuptools.command.install import install from setuptools import setup, Extension __author__ = '<NAME>' __email__",
"sources, extension) except DistutilsExecError: print('\\nError: SWIG failed. Is Open Babel installed?\\n' 'You may",
":: Bio-Informatics', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Software Development :: Libraries'",
"not be compatible.' % version) include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir') library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir')",
"chemistry library', long_description=long_description, zip_safe=False, cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt, 'install': CustomInstall, 'sdist': CustomSdist}, py_modules=['openbabel',",
"interface to the Open Babel chemistry library', long_description=long_description, zip_safe=False, cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt,",
"better error message.\"\"\" def finalize_options(self): # Setting include_dirs, library_dirs, swig_opts here instead of",
"related areas. It provides a broad base of chemical functionality for custom development.",
"['-c++', '-small', '-O', '-templatereduce', '-naturalvar'] self.swig_opts += ['-I%s' % i for i in",
"'Operating System :: POSIX', 'Operating System :: POSIX :: Linux', 'Operating System ::",
"first in build command.\"\"\" def run(self): self.run_command('build_ext') build.run(self) class CustomInstall(install): \"\"\"Ensure build_ext runs",
"POSIX', 'Operating System :: POSIX :: Linux', 'Operating System :: Unix', 'Programming Language",
":: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Software Development",
"General Public License (GPL)', 'Natural Language :: English', 'Operating System :: MacOS ::",
"Python', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic ::",
"-I and -L command line options to python setup.py build_ext. build_ext.finalize_options(self) include_dirs, library_dirs",
"license=__license__, url='http://openbabel.org/', description='Python interface to the Open Babel chemistry library', long_description=long_description, zip_safe=False, cmdclass={'build':",
"import os import subprocess import sys from distutils.command.build import build from distutils.command.sdist import",
"import sdist from distutils.errors import DistutilsExecError from distutils.version import StrictVersion from setuptools.command.build_ext import",
"setup.py install') sys.exit(1) obextension = Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel']) setup(name='openbabel', version=__version__, author=__author__, author_email=__email__, license=__license__,",
"run(self): self.run_command('build_ext') build.run(self) class CustomInstall(install): \"\"\"Ensure build_ext runs first in install command.\"\"\" def",
"libraries=['openbabel']) setup(name='openbabel', version=__version__, author=__author__, author_email=__email__, license=__license__, url='http://openbabel.org/', description='Python interface to the Open Babel",
"from parent directory.\"\"\" def make_release_tree(self, base_dir, files): sdist.make_release_tree(self, base_dir, files) link = 'hard'",
"System :: POSIX :: Linux', 'Operating System :: Unix', 'Programming Language :: C++',",
"'You may need to manually specify the location of Open Babel include and",
"'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Software",
":: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: OS",
"store data from molecular modeling, chemistry, solid-state materials, biochemistry, or related areas. It",
"hasattr(os, 'link') else None self.copy_file('../stereo.i', base_dir, link=link) self.copy_file('../openbabel-python.i', base_dir, link=link) class CustomBuildExt(build_ext): \"\"\"Custom",
"build_ext runs first in install command.\"\"\" def run(self): self.run_command('build_ext') install.run(self) class CustomSdist(sdist): \"\"\"Add",
"from distutils.command.sdist import sdist from distutils.errors import DistutilsExecError from distutils.version import StrictVersion from",
"'install': CustomInstall, 'sdist': CustomSdist}, py_modules=['openbabel', 'pybel'], ext_modules=[obextension], classifiers=[ 'Development Status :: 5 -",
"DistutilsExecError from distutils.version import StrictVersion from setuptools.command.build_ext import build_ext from setuptools.command.install import install",
"stderr: raise PkgConfigError('package %s could not be found by pkg-config' % package) return",
"CustomBuild(build): \"\"\"Ensure build_ext runs first in build command.\"\"\" def run(self): self.run_command('build_ext') build.run(self) class",
"Extension constructor allows them to be # overridden using -I and -L command",
"import StrictVersion from setuptools.command.build_ext import build_ext from setuptools.command.install import install from setuptools import",
"build_ext. build_ext.finalize_options(self) include_dirs, library_dirs = locate_ob() self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs) self.swig_opts = ['-c++', '-small', '-O',",
"build_ext runs first in build command.\"\"\" def run(self): self.run_command('build_ext') build.run(self) class CustomInstall(install): \"\"\"Ensure",
"distutils.command.build import build from distutils.command.sdist import sdist from distutils.errors import DistutilsExecError from distutils.version",
"build from distutils.command.sdist import sdist from distutils.errors import DistutilsExecError from distutils.version import StrictVersion",
"(GPL)', 'Natural Language :: English', 'Operating System :: MacOS :: MacOS X', 'Operating",
"raise PkgConfigError('pkg-config could not be found') def locate_ob(): \"\"\"Try use pkgconfig to locate",
"% i for i in self.include_dirs] print('- include_dirs: %s\\n- library_dirs: %s' % (self.include_dirs,",
"functionality for custom development. ''' class PkgConfigError(Exception): pass def pkgconfig(package, option): \"\"\"Wrapper around",
"and library directories. ' 'For example:\\n' ' python setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n' '",
"Public License (GPL)', 'Natural Language :: English', 'Operating System :: MacOS :: MacOS",
"option, package], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = p.communicate() if stderr: raise PkgConfigError('package",
"SWIG options and print a better error message.\"\"\" def finalize_options(self): # Setting include_dirs,",
"swig interface files into distribution from parent directory.\"\"\" def make_release_tree(self, base_dir, files): sdist.make_release_tree(self,",
"anyone to search, convert, analyze, or store data from molecular modeling, chemistry, solid-state",
"import sys from distutils.command.build import build from distutils.command.sdist import sdist from distutils.errors import",
"otherwise guess default location.\"\"\" try: version = pkgconfig('openbabel-2.0', '--modversion') if not StrictVersion(version) >=",
"be found by pkg-config' % package) return stdout.strip() except OSError: raise PkgConfigError('pkg-config could",
"'Programming Language :: C++', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Bio-Informatics',",
"'GPL' if os.path.exists('README.rst'): long_description = open('README.rst').read() else: long_description = ''' The Open Babel",
"library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir') print('Open Babel location automatically determined by pkg-config:') except PkgConfigError",
"os.path.exists('README.rst'): long_description = open('README.rst').read() else: long_description = ''' The Open Babel package provides",
"directory.\"\"\" def make_release_tree(self, base_dir, files): sdist.make_release_tree(self, base_dir, files) link = 'hard' if hasattr(os,",
"os import subprocess import sys from distutils.command.build import build from distutils.command.sdist import sdist",
"MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: OS Independent',",
"import build from distutils.command.sdist import sdist from distutils.errors import DistutilsExecError from distutils.version import",
"import build_ext from setuptools.command.install import install from setuptools import setup, Extension __author__ =",
":: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General",
"areas. It provides a broad base of chemical functionality for custom development. '''",
"wrapper to the Open Babel C++ chemistry library. Open Babel is a chemical",
"options to python setup.py build_ext. build_ext.finalize_options(self) include_dirs, library_dirs = locate_ob() self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs) self.swig_opts",
"# Setting include_dirs, library_dirs, swig_opts here instead of in Extension constructor allows them",
"distutils.version import StrictVersion from setuptools.command.build_ext import build_ext from setuptools.command.install import install from setuptools",
"Babel chemistry library', long_description=long_description, zip_safe=False, cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt, 'install': CustomInstall, 'sdist': CustomSdist},",
"p = subprocess.Popen(['pkg-config', option, package], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = p.communicate() if",
"= open('README.rst').read() else: long_description = ''' The Open Babel package provides a Python",
"stdout, stderr = p.communicate() if stderr: raise PkgConfigError('package %s could not be found",
"%s\\n- library_dirs: %s' % (self.include_dirs, self.library_dirs)) def swig_sources(self, sources, extension): try: return build_ext.swig_sources(self,",
"link=link) self.copy_file('../openbabel-python.i', base_dir, link=link) class CustomBuildExt(build_ext): \"\"\"Custom build_ext to set SWIG options and",
"a chemical toolbox designed to speak the many languages of chemical data. It's",
"command.\"\"\" def run(self): self.run_command('build_ext') install.run(self) class CustomSdist(sdist): \"\"\"Add swig interface files into distribution",
"def run(self): self.run_command('build_ext') install.run(self) class CustomSdist(sdist): \"\"\"Add swig interface files into distribution from",
"Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public",
"designed to speak the many languages of chemical data. It's an open, collaborative",
"= subprocess.Popen(['pkg-config', option, package], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = p.communicate() if stderr:",
"'--variable=libdir') print('Open Babel location automatically determined by pkg-config:') except PkgConfigError as e: print('Warning:",
"if hasattr(os, 'link') else None self.copy_file('../stereo.i', base_dir, link=link) self.copy_file('../openbabel-python.i', base_dir, link=link) class CustomBuildExt(build_ext):",
"long_description = open('README.rst').read() else: long_description = ''' The Open Babel package provides a",
"= locate_ob() self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs) self.swig_opts = ['-c++', '-small', '-O', '-templatereduce', '-naturalvar'] self.swig_opts +=",
"may need to manually specify the location of Open Babel include and library",
"compatible.' % version) include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir') library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir') print('Open Babel",
"print('Warning: %s.\\nGuessing Open Babel location:' % e) include_dirs = '/usr/local/include/openbabel-2.0' library_dirs = '/usr/local/lib'",
"pkg-config command line tool.\"\"\" try: p = subprocess.Popen(['pkg-config', option, package], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)",
"into distribution from parent directory.\"\"\" def make_release_tree(self, base_dir, files): sdist.make_release_tree(self, base_dir, files) link",
"description='Python interface to the Open Babel chemistry library', long_description=long_description, zip_safe=False, cmdclass={'build': CustomBuild, 'build_ext':",
"build_ext.swig_sources(self, sources, extension) except DistutilsExecError: print('\\nError: SWIG failed. Is Open Babel installed?\\n' 'You",
"to search, convert, analyze, or store data from molecular modeling, chemistry, solid-state materials,",
"py_modules=['openbabel', 'pybel'], ext_modules=[obextension], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console',",
"parent directory.\"\"\" def make_release_tree(self, base_dir, files): sdist.make_release_tree(self, base_dir, files) link = 'hard' if",
":: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System",
"= p.communicate() if stderr: raise PkgConfigError('package %s could not be found by pkg-config'",
"__version__ = '2.4.0' __license__ = 'GPL' if os.path.exists('README.rst'): long_description = open('README.rst').read() else: long_description",
"= pkgconfig('openbabel-2.0', '--variable=libdir') print('Open Babel location automatically determined by pkg-config:') except PkgConfigError as",
"provides a broad base of chemical functionality for custom development. ''' class PkgConfigError(Exception):",
"library_dirs = locate_ob() self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs) self.swig_opts = ['-c++', '-small', '-O', '-templatereduce', '-naturalvar'] self.swig_opts",
"CustomSdist(sdist): \"\"\"Add swig interface files into distribution from parent directory.\"\"\" def make_release_tree(self, base_dir,",
"CustomBuildExt(build_ext): \"\"\"Custom build_ext to set SWIG options and print a better error message.\"\"\"",
"'build_ext': CustomBuildExt, 'install': CustomInstall, 'sdist': CustomSdist}, py_modules=['openbabel', 'pybel'], ext_modules=[obextension], classifiers=[ 'Development Status ::",
"library_dirs class CustomBuild(build): \"\"\"Ensure build_ext runs first in build command.\"\"\" def run(self): self.run_command('build_ext')",
"Independent', 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux', 'Operating System",
"of chemical data. It's an open, collaborative project allowing anyone to search, convert,",
"GNU General Public License (GPL)', 'Natural Language :: English', 'Operating System :: MacOS",
"install command.\"\"\" def run(self): self.run_command('build_ext') install.run(self) class CustomSdist(sdist): \"\"\"Add swig interface files into",
"chemical functionality for custom development. ''' class PkgConfigError(Exception): pass def pkgconfig(package, option): \"\"\"Wrapper",
"using -I and -L command line options to python setup.py build_ext. build_ext.finalize_options(self) include_dirs,",
"# overridden using -I and -L command line options to python setup.py build_ext.",
"or later is required. Your version (%s) may not be compatible.' % version)",
"System :: Microsoft :: Windows', 'Operating System :: OS Independent', 'Operating System ::",
"Babel installed?\\n' 'You may need to manually specify the location of Open Babel",
"from distutils.errors import DistutilsExecError from distutils.version import StrictVersion from setuptools.command.build_ext import build_ext from",
"could not be found by pkg-config' % package) return stdout.strip() except OSError: raise",
"sys.exit(1) obextension = Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel']) setup(name='openbabel', version=__version__, author=__author__, author_email=__email__, license=__license__, url='http://openbabel.org/', description='Python",
"return build_ext.swig_sources(self, sources, extension) except DistutilsExecError: print('\\nError: SWIG failed. Is Open Babel installed?\\n'",
"set SWIG options and print a better error message.\"\"\" def finalize_options(self): # Setting",
"to the Open Babel chemistry library', long_description=long_description, zip_safe=False, cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt, 'install':",
"<gh_stars>1-10 #!/usr/bin/env python import os import subprocess import sys from distutils.command.build import build",
"% e) include_dirs = '/usr/local/include/openbabel-2.0' library_dirs = '/usr/local/lib' return include_dirs, library_dirs class CustomBuild(build):",
"to set SWIG options and print a better error message.\"\"\" def finalize_options(self): #",
"''' The Open Babel package provides a Python wrapper to the Open Babel",
"System :: POSIX', 'Operating System :: POSIX :: Linux', 'Operating System :: Unix',",
"to manually specify the location of Open Babel include and library directories. '",
"python import os import subprocess import sys from distutils.command.build import build from distutils.command.sdist",
"Python wrapper to the Open Babel C++ chemistry library. Open Babel is a",
"Open Babel is a chemical toolbox designed to speak the many languages of",
"install.run(self) class CustomSdist(sdist): \"\"\"Add swig interface files into distribution from parent directory.\"\"\" def",
"in self.include_dirs] print('- include_dirs: %s\\n- library_dirs: %s' % (self.include_dirs, self.library_dirs)) def swig_sources(self, sources,",
"in Extension constructor allows them to be # overridden using -I and -L",
"be # overridden using -I and -L command line options to python setup.py",
"pkg-config' % package) return stdout.strip() except OSError: raise PkgConfigError('pkg-config could not be found')",
"error message.\"\"\" def finalize_options(self): # Setting include_dirs, library_dirs, swig_opts here instead of in",
"library', long_description=long_description, zip_safe=False, cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt, 'install': CustomInstall, 'sdist': CustomSdist}, py_modules=['openbabel', 'pybel'],",
"pkgconfig('openbabel-2.0', '--modversion') if not StrictVersion(version) >= StrictVersion('2.3.0'): print('Warning: Open Babel 2.3.0 or later",
"search, convert, analyze, or store data from molecular modeling, chemistry, solid-state materials, biochemistry,",
"def swig_sources(self, sources, extension): try: return build_ext.swig_sources(self, sources, extension) except DistutilsExecError: print('\\nError: SWIG",
"base of chemical functionality for custom development. ''' class PkgConfigError(Exception): pass def pkgconfig(package,",
"class CustomSdist(sdist): \"\"\"Add swig interface files into distribution from parent directory.\"\"\" def make_release_tree(self,",
"__license__ = 'GPL' if os.path.exists('README.rst'): long_description = open('README.rst').read() else: long_description = ''' The",
"and print a better error message.\"\"\" def finalize_options(self): # Setting include_dirs, library_dirs, swig_opts",
"import subprocess import sys from distutils.command.build import build from distutils.command.sdist import sdist from",
"i for i in self.include_dirs] print('- include_dirs: %s\\n- library_dirs: %s' % (self.include_dirs, self.library_dirs))",
"PkgConfigError(Exception): pass def pkgconfig(package, option): \"\"\"Wrapper around pkg-config command line tool.\"\"\" try: p",
"setuptools.command.build_ext import build_ext from setuptools.command.install import install from setuptools import setup, Extension __author__",
"class CustomBuildExt(build_ext): \"\"\"Custom build_ext to set SWIG options and print a better error",
"Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU",
"open, collaborative project allowing anyone to search, convert, analyze, or store data from",
"setup.py build_ext. build_ext.finalize_options(self) include_dirs, library_dirs = locate_ob() self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs) self.swig_opts = ['-c++', '-small',",
"'Operating System :: Microsoft :: Windows', 'Operating System :: OS Independent', 'Operating System",
"around pkg-config command line tool.\"\"\" try: p = subprocess.Popen(['pkg-config', option, package], stdout=subprocess.PIPE, stderr=subprocess.PIPE,",
"PkgConfigError('package %s could not be found by pkg-config' % package) return stdout.strip() except",
"Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel']) setup(name='openbabel', version=__version__, author=__author__, author_email=__email__, license=__license__, url='http://openbabel.org/', description='Python interface to the",
"line tool.\"\"\" try: p = subprocess.Popen(['pkg-config', option, package], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr",
"Your version (%s) may not be compatible.' % version) include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir')",
"library_dirs: %s' % (self.include_dirs, self.library_dirs)) def swig_sources(self, sources, extension): try: return build_ext.swig_sources(self, sources,",
"Open Babel chemistry library', long_description=long_description, zip_safe=False, cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt, 'install': CustomInstall, 'sdist':",
"if stderr: raise PkgConfigError('package %s could not be found by pkg-config' % package)",
"Babel location:' % e) include_dirs = '/usr/local/include/openbabel-2.0' library_dirs = '/usr/local/lib' return include_dirs, library_dirs",
"English', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft ::",
"version) include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir') library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir') print('Open Babel location automatically",
"def run(self): self.run_command('build_ext') build.run(self) class CustomInstall(install): \"\"\"Ensure build_ext runs first in install command.\"\"\"",
"class PkgConfigError(Exception): pass def pkgconfig(package, option): \"\"\"Wrapper around pkg-config command line tool.\"\"\" try:",
"'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public License",
":: GNU General Public License (GPL)', 'Natural Language :: English', 'Operating System ::",
"an open, collaborative project allowing anyone to search, convert, analyze, or store data",
"print('- include_dirs: %s\\n- library_dirs: %s' % (self.include_dirs, self.library_dirs)) def swig_sources(self, sources, extension): try:",
"= ''' The Open Babel package provides a Python wrapper to the Open",
"'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved ::",
"speak the many languages of chemical data. It's an open, collaborative project allowing",
":: Other Environment', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License ::",
"'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Software Development :: Libraries' ] )",
"base_dir, link=link) class CustomBuildExt(build_ext): \"\"\"Custom build_ext to set SWIG options and print a",
"location of Open Babel include and library directories. ' 'For example:\\n' ' python",
"author_email=__email__, license=__license__, url='http://openbabel.org/', description='Python interface to the Open Babel chemistry library', long_description=long_description, zip_safe=False,",
"from setuptools import setup, Extension __author__ = '<NAME>' __email__ = '<EMAIL>' __version__ =",
"custom development. ''' class PkgConfigError(Exception): pass def pkgconfig(package, option): \"\"\"Wrapper around pkg-config command",
"base_dir, link=link) self.copy_file('../openbabel-python.i', base_dir, link=link) class CustomBuildExt(build_ext): \"\"\"Custom build_ext to set SWIG options",
"library directories. ' 'For example:\\n' ' python setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n' ' python",
"Language :: Python', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Chemistry',",
"'-O', '-templatereduce', '-naturalvar'] self.swig_opts += ['-I%s' % i for i in self.include_dirs] print('-",
"= '2.4.0' __license__ = 'GPL' if os.path.exists('README.rst'): long_description = open('README.rst').read() else: long_description =",
"pkg-config:') except PkgConfigError as e: print('Warning: %s.\\nGuessing Open Babel location:' % e) include_dirs",
"'Operating System :: POSIX :: Linux', 'Operating System :: Unix', 'Programming Language ::",
"package) return stdout.strip() except OSError: raise PkgConfigError('pkg-config could not be found') def locate_ob():",
"a Python wrapper to the Open Babel C++ chemistry library. Open Babel is",
"build command.\"\"\" def run(self): self.run_command('build_ext') build.run(self) class CustomInstall(install): \"\"\"Ensure build_ext runs first in",
":: Linux', 'Operating System :: Unix', 'Programming Language :: C++', 'Programming Language ::",
"swig_opts here instead of in Extension constructor allows them to be # overridden",
"library_dirs = '/usr/local/lib' return include_dirs, library_dirs class CustomBuild(build): \"\"\"Ensure build_ext runs first in",
"= '<EMAIL>' __version__ = '2.4.0' __license__ = 'GPL' if os.path.exists('README.rst'): long_description = open('README.rst').read()",
"guess default location.\"\"\" try: version = pkgconfig('openbabel-2.0', '--modversion') if not StrictVersion(version) >= StrictVersion('2.3.0'):",
"pass def pkgconfig(package, option): \"\"\"Wrapper around pkg-config command line tool.\"\"\" try: p =",
"setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n' ' python setup.py install') sys.exit(1) obextension = Extension('_openbabel', ['openbabel-python.i'],",
"'Natural Language :: English', 'Operating System :: MacOS :: MacOS X', 'Operating System",
"System :: OS Independent', 'Operating System :: POSIX', 'Operating System :: POSIX ::",
"__author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '2.4.0' __license__ = 'GPL' if",
"long_description = ''' The Open Babel package provides a Python wrapper to the",
"interface files into distribution from parent directory.\"\"\" def make_release_tree(self, base_dir, files): sdist.make_release_tree(self, base_dir,",
"instead of in Extension constructor allows them to be # overridden using -I",
"classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Other",
"version=__version__, author=__author__, author_email=__email__, license=__license__, url='http://openbabel.org/', description='Python interface to the Open Babel chemistry library',",
"subprocess import sys from distutils.command.build import build from distutils.command.sdist import sdist from distutils.errors",
"'/usr/local/lib' return include_dirs, library_dirs class CustomBuild(build): \"\"\"Ensure build_ext runs first in build command.\"\"\"",
"may not be compatible.' % version) include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir') library_dirs = pkgconfig('openbabel-2.0',",
"StrictVersion(version) >= StrictVersion('2.3.0'): print('Warning: Open Babel 2.3.0 or later is required. Your version",
"a better error message.\"\"\" def finalize_options(self): # Setting include_dirs, library_dirs, swig_opts here instead",
"class CustomBuild(build): \"\"\"Ensure build_ext runs first in build command.\"\"\" def run(self): self.run_command('build_ext') build.run(self)",
"PkgConfigError as e: print('Warning: %s.\\nGuessing Open Babel location:' % e) include_dirs = '/usr/local/include/openbabel-2.0'",
"required. Your version (%s) may not be compatible.' % version) include_dirs = pkgconfig('openbabel-2.0',",
"for custom development. ''' class PkgConfigError(Exception): pass def pkgconfig(package, option): \"\"\"Wrapper around pkg-config",
"' 'For example:\\n' ' python setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n' ' python setup.py install')",
"'-naturalvar'] self.swig_opts += ['-I%s' % i for i in self.include_dirs] print('- include_dirs: %s\\n-",
"files into distribution from parent directory.\"\"\" def make_release_tree(self, base_dir, files): sdist.make_release_tree(self, base_dir, files)",
"be found') def locate_ob(): \"\"\"Try use pkgconfig to locate Open Babel, otherwise guess",
"make_release_tree(self, base_dir, files): sdist.make_release_tree(self, base_dir, files) link = 'hard' if hasattr(os, 'link') else",
"build.run(self) class CustomInstall(install): \"\"\"Ensure build_ext runs first in install command.\"\"\" def run(self): self.run_command('build_ext')",
"Language :: C++', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic",
"Babel package provides a Python wrapper to the Open Babel C++ chemistry library.",
"else: long_description = ''' The Open Babel package provides a Python wrapper to",
"if os.path.exists('README.rst'): long_description = open('README.rst').read() else: long_description = ''' The Open Babel package",
"'link') else None self.copy_file('../stereo.i', base_dir, link=link) self.copy_file('../openbabel-python.i', base_dir, link=link) class CustomBuildExt(build_ext): \"\"\"Custom build_ext",
"modeling, chemistry, solid-state materials, biochemistry, or related areas. It provides a broad base",
"else None self.copy_file('../stereo.i', base_dir, link=link) self.copy_file('../openbabel-python.i', base_dir, link=link) class CustomBuildExt(build_ext): \"\"\"Custom build_ext to",
":: OSI Approved :: GNU General Public License (GPL)', 'Natural Language :: English',",
"by pkg-config' % package) return stdout.strip() except OSError: raise PkgConfigError('pkg-config could not be",
"StrictVersion('2.3.0'): print('Warning: Open Babel 2.3.0 or later is required. Your version (%s) may",
"CustomInstall(install): \"\"\"Ensure build_ext runs first in install command.\"\"\" def run(self): self.run_command('build_ext') install.run(self) class",
"subprocess.Popen(['pkg-config', option, package], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = p.communicate() if stderr: raise",
"5 - Production/Stable', 'Environment :: Console', 'Environment :: Other Environment', 'Intended Audience ::",
"\"\"\"Try use pkgconfig to locate Open Babel, otherwise guess default location.\"\"\" try: version",
"\"\"\"Wrapper around pkg-config command line tool.\"\"\" try: p = subprocess.Popen(['pkg-config', option, package], stdout=subprocess.PIPE,",
"to python setup.py build_ext. build_ext.finalize_options(self) include_dirs, library_dirs = locate_ob() self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs) self.swig_opts =",
"Is Open Babel installed?\\n' 'You may need to manually specify the location of",
"p.communicate() if stderr: raise PkgConfigError('package %s could not be found by pkg-config' %",
"Language :: English', 'Operating System :: MacOS :: MacOS X', 'Operating System ::",
"import install from setuptools import setup, Extension __author__ = '<NAME>' __email__ = '<EMAIL>'",
"directories. ' 'For example:\\n' ' python setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n' ' python setup.py",
"Babel is a chemical toolbox designed to speak the many languages of chemical",
"= Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel']) setup(name='openbabel', version=__version__, author=__author__, author_email=__email__, license=__license__, url='http://openbabel.org/', description='Python interface to",
"Science/Research', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Natural Language",
"package], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = p.communicate() if stderr: raise PkgConfigError('package %s",
"Babel 2.3.0 or later is required. Your version (%s) may not be compatible.'",
":: Microsoft :: Windows', 'Operating System :: OS Independent', 'Operating System :: POSIX',",
"\"\"\"Add swig interface files into distribution from parent directory.\"\"\" def make_release_tree(self, base_dir, files):",
"locate Open Babel, otherwise guess default location.\"\"\" try: version = pkgconfig('openbabel-2.0', '--modversion') if",
"Extension __author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '2.4.0' __license__ = 'GPL'",
"'/usr/local/include/openbabel-2.0' library_dirs = '/usr/local/lib' return include_dirs, library_dirs class CustomBuild(build): \"\"\"Ensure build_ext runs first",
"% (self.include_dirs, self.library_dirs)) def swig_sources(self, sources, extension): try: return build_ext.swig_sources(self, sources, extension) except",
"include_dirs = '/usr/local/include/openbabel-2.0' library_dirs = '/usr/local/lib' return include_dirs, library_dirs class CustomBuild(build): \"\"\"Ensure build_ext",
"failed. Is Open Babel installed?\\n' 'You may need to manually specify the location",
"Setting include_dirs, library_dirs, swig_opts here instead of in Extension constructor allows them to",
"default location.\"\"\" try: version = pkgconfig('openbabel-2.0', '--modversion') if not StrictVersion(version) >= StrictVersion('2.3.0'): print('Warning:",
"Babel location automatically determined by pkg-config:') except PkgConfigError as e: print('Warning: %s.\\nGuessing Open",
"author=__author__, author_email=__email__, license=__license__, url='http://openbabel.org/', description='Python interface to the Open Babel chemistry library', long_description=long_description,",
"C++', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering",
"locate_ob(): \"\"\"Try use pkgconfig to locate Open Babel, otherwise guess default location.\"\"\" try:",
"raise PkgConfigError('package %s could not be found by pkg-config' % package) return stdout.strip()",
"System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating",
"Open Babel installed?\\n' 'You may need to manually specify the location of Open",
"OSI Approved :: GNU General Public License (GPL)', 'Natural Language :: English', 'Operating",
"print('Open Babel location automatically determined by pkg-config:') except PkgConfigError as e: print('Warning: %s.\\nGuessing",
"Environment', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved",
"here instead of in Extension constructor allows them to be # overridden using",
":: Console', 'Environment :: Other Environment', 'Intended Audience :: Education', 'Intended Audience ::",
"X', 'Operating System :: Microsoft :: Windows', 'Operating System :: OS Independent', 'Operating",
"collaborative project allowing anyone to search, convert, analyze, or store data from molecular",
"option): \"\"\"Wrapper around pkg-config command line tool.\"\"\" try: p = subprocess.Popen(['pkg-config', option, package],",
"not be found by pkg-config' % package) return stdout.strip() except OSError: raise PkgConfigError('pkg-config",
"version (%s) may not be compatible.' % version) include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir') library_dirs",
"zip_safe=False, cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt, 'install': CustomInstall, 'sdist': CustomSdist}, py_modules=['openbabel', 'pybel'], ext_modules=[obextension], classifiers=[",
"to speak the many languages of chemical data. It's an open, collaborative project",
"SWIG failed. Is Open Babel installed?\\n' 'You may need to manually specify the",
"except PkgConfigError as e: print('Warning: %s.\\nGuessing Open Babel location:' % e) include_dirs =",
"C++ chemistry library. Open Babel is a chemical toolbox designed to speak the",
"toolbox designed to speak the many languages of chemical data. It's an open,",
"e) include_dirs = '/usr/local/include/openbabel-2.0' library_dirs = '/usr/local/lib' return include_dirs, library_dirs class CustomBuild(build): \"\"\"Ensure",
"data. It's an open, collaborative project allowing anyone to search, convert, analyze, or",
"self.copy_file('../stereo.i', base_dir, link=link) self.copy_file('../openbabel-python.i', base_dir, link=link) class CustomBuildExt(build_ext): \"\"\"Custom build_ext to set SWIG",
"them to be # overridden using -I and -L command line options to",
":: Science/Research', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Natural",
"'License :: OSI Approved :: GNU General Public License (GPL)', 'Natural Language ::",
"by pkg-config:') except PkgConfigError as e: print('Warning: %s.\\nGuessing Open Babel location:' % e)",
"base_dir, files): sdist.make_release_tree(self, base_dir, files) link = 'hard' if hasattr(os, 'link') else None",
"#!/usr/bin/env python import os import subprocess import sys from distutils.command.build import build from",
"''' class PkgConfigError(Exception): pass def pkgconfig(package, option): \"\"\"Wrapper around pkg-config command line tool.\"\"\"",
"python setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n' ' python setup.py install') sys.exit(1) obextension = Extension('_openbabel',",
"version = pkgconfig('openbabel-2.0', '--modversion') if not StrictVersion(version) >= StrictVersion('2.3.0'): print('Warning: Open Babel 2.3.0",
"install from setuptools import setup, Extension __author__ = '<NAME>' __email__ = '<EMAIL>' __version__",
"specify the location of Open Babel include and library directories. ' 'For example:\\n'",
"the location of Open Babel include and library directories. ' 'For example:\\n' '",
"self.swig_opts += ['-I%s' % i for i in self.include_dirs] print('- include_dirs: %s\\n- library_dirs:",
":: English', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft",
"distribution from parent directory.\"\"\" def make_release_tree(self, base_dir, files): sdist.make_release_tree(self, base_dir, files) link =",
"include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir') library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir') print('Open Babel location automatically determined",
"return include_dirs, library_dirs class CustomBuild(build): \"\"\"Ensure build_ext runs first in build command.\"\"\" def",
"library. Open Babel is a chemical toolbox designed to speak the many languages",
"['-I%s' % i for i in self.include_dirs] print('- include_dirs: %s\\n- library_dirs: %s' %",
"as e: print('Warning: %s.\\nGuessing Open Babel location:' % e) include_dirs = '/usr/local/include/openbabel-2.0' library_dirs",
"biochemistry, or related areas. It provides a broad base of chemical functionality for",
"extension): try: return build_ext.swig_sources(self, sources, extension) except DistutilsExecError: print('\\nError: SWIG failed. Is Open",
"extension) except DistutilsExecError: print('\\nError: SWIG failed. Is Open Babel installed?\\n' 'You may need",
"a broad base of chemical functionality for custom development. ''' class PkgConfigError(Exception): pass",
"need to manually specify the location of Open Babel include and library directories.",
"Production/Stable', 'Environment :: Console', 'Environment :: Other Environment', 'Intended Audience :: Education', 'Intended",
"message.\"\"\" def finalize_options(self): # Setting include_dirs, library_dirs, swig_opts here instead of in Extension",
"determined by pkg-config:') except PkgConfigError as e: print('Warning: %s.\\nGuessing Open Babel location:' %",
"= '/usr/local/include/openbabel-2.0' library_dirs = '/usr/local/lib' return include_dirs, library_dirs class CustomBuild(build): \"\"\"Ensure build_ext runs",
"= '/usr/local/lib' return include_dirs, library_dirs class CustomBuild(build): \"\"\"Ensure build_ext runs first in build",
"OS Independent', 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux', 'Operating",
"to the Open Babel C++ chemistry library. Open Babel is a chemical toolbox",
"finalize_options(self): # Setting include_dirs, library_dirs, swig_opts here instead of in Extension constructor allows",
"i in self.include_dirs] print('- include_dirs: %s\\n- library_dirs: %s' % (self.include_dirs, self.library_dirs)) def swig_sources(self,",
"try: p = subprocess.Popen(['pkg-config', option, package], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = p.communicate()",
"'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Other Environment',",
"from molecular modeling, chemistry, solid-state materials, biochemistry, or related areas. It provides a",
"pkgconfig to locate Open Babel, otherwise guess default location.\"\"\" try: version = pkgconfig('openbabel-2.0',",
"command line options to python setup.py build_ext. build_ext.finalize_options(self) include_dirs, library_dirs = locate_ob() self.include_dirs.append(include_dirs)",
"manually specify the location of Open Babel include and library directories. ' 'For",
":: OS Independent', 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux',",
"runs first in build command.\"\"\" def run(self): self.run_command('build_ext') build.run(self) class CustomInstall(install): \"\"\"Ensure build_ext",
"example:\\n' ' python setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n' ' python setup.py install') sys.exit(1) obextension",
"except DistutilsExecError: print('\\nError: SWIG failed. Is Open Babel installed?\\n' 'You may need to",
"-I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n' ' python setup.py install') sys.exit(1) obextension = Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel']) setup(name='openbabel',",
"command.\"\"\" def run(self): self.run_command('build_ext') build.run(self) class CustomInstall(install): \"\"\"Ensure build_ext runs first in install",
"Other Environment', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI",
"'Operating System :: OS Independent', 'Operating System :: POSIX', 'Operating System :: POSIX",
"chemical toolbox designed to speak the many languages of chemical data. It's an",
"= 'hard' if hasattr(os, 'link') else None self.copy_file('../stereo.i', base_dir, link=link) self.copy_file('../openbabel-python.i', base_dir, link=link)",
"include_dirs, library_dirs class CustomBuild(build): \"\"\"Ensure build_ext runs first in build command.\"\"\" def run(self):",
"command line tool.\"\"\" try: p = subprocess.Popen(['pkg-config', option, package], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout,",
"run(self): self.run_command('build_ext') install.run(self) class CustomSdist(sdist): \"\"\"Add swig interface files into distribution from parent",
"pkgconfig('openbabel-2.0', '--variable=pkgincludedir') library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir') print('Open Babel location automatically determined by pkg-config:')",
"import setup, Extension __author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '2.4.0' __license__",
"base_dir, files) link = 'hard' if hasattr(os, 'link') else None self.copy_file('../stereo.i', base_dir, link=link)",
"to be # overridden using -I and -L command line options to python",
"pkgconfig(package, option): \"\"\"Wrapper around pkg-config command line tool.\"\"\" try: p = subprocess.Popen(['pkg-config', option,",
"['openbabel-python.i'], libraries=['openbabel']) setup(name='openbabel', version=__version__, author=__author__, author_email=__email__, license=__license__, url='http://openbabel.org/', description='Python interface to the Open",
"Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Other Environment', 'Intended",
"System :: Unix', 'Programming Language :: C++', 'Programming Language :: Python', 'Topic ::",
"Babel, otherwise guess default location.\"\"\" try: version = pkgconfig('openbabel-2.0', '--modversion') if not StrictVersion(version)",
"'hard' if hasattr(os, 'link') else None self.copy_file('../stereo.i', base_dir, link=link) self.copy_file('../openbabel-python.i', base_dir, link=link) class",
"broad base of chemical functionality for custom development. ''' class PkgConfigError(Exception): pass def",
"(self.include_dirs, self.library_dirs)) def swig_sources(self, sources, extension): try: return build_ext.swig_sources(self, sources, extension) except DistutilsExecError:",
"could not be found') def locate_ob(): \"\"\"Try use pkgconfig to locate Open Babel,",
"use pkgconfig to locate Open Babel, otherwise guess default location.\"\"\" try: version =",
"from distutils.command.build import build from distutils.command.sdist import sdist from distutils.errors import DistutilsExecError from",
"def make_release_tree(self, base_dir, files): sdist.make_release_tree(self, base_dir, files) link = 'hard' if hasattr(os, 'link')",
"overridden using -I and -L command line options to python setup.py build_ext. build_ext.finalize_options(self)",
"for i in self.include_dirs] print('- include_dirs: %s\\n- library_dirs: %s' % (self.include_dirs, self.library_dirs)) def",
"if not StrictVersion(version) >= StrictVersion('2.3.0'): print('Warning: Open Babel 2.3.0 or later is required.",
"Open Babel package provides a Python wrapper to the Open Babel C++ chemistry",
"or related areas. It provides a broad base of chemical functionality for custom",
"' python setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n' ' python setup.py install') sys.exit(1) obextension =",
"open('README.rst').read() else: long_description = ''' The Open Babel package provides a Python wrapper",
"sdist from distutils.errors import DistutilsExecError from distutils.version import StrictVersion from setuptools.command.build_ext import build_ext",
"setuptools.command.install import install from setuptools import setup, Extension __author__ = '<NAME>' __email__ =",
"build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n' ' python setup.py install') sys.exit(1) obextension = Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel'])",
"= 'GPL' if os.path.exists('README.rst'): long_description = open('README.rst').read() else: long_description = ''' The Open",
"location automatically determined by pkg-config:') except PkgConfigError as e: print('Warning: %s.\\nGuessing Open Babel",
"self.run_command('build_ext') build.run(self) class CustomInstall(install): \"\"\"Ensure build_ext runs first in install command.\"\"\" def run(self):",
"the many languages of chemical data. It's an open, collaborative project allowing anyone",
"files): sdist.make_release_tree(self, base_dir, files) link = 'hard' if hasattr(os, 'link') else None self.copy_file('../stereo.i',",
"= '<NAME>' __email__ = '<EMAIL>' __version__ = '2.4.0' __license__ = 'GPL' if os.path.exists('README.rst'):",
"def locate_ob(): \"\"\"Try use pkgconfig to locate Open Babel, otherwise guess default location.\"\"\"",
"development. ''' class PkgConfigError(Exception): pass def pkgconfig(package, option): \"\"\"Wrapper around pkg-config command line",
"long_description=long_description, zip_safe=False, cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt, 'install': CustomInstall, 'sdist': CustomSdist}, py_modules=['openbabel', 'pybel'], ext_modules=[obextension],",
"library_dirs, swig_opts here instead of in Extension constructor allows them to be #",
"python setup.py install') sys.exit(1) obextension = Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel']) setup(name='openbabel', version=__version__, author=__author__, author_email=__email__,",
"setuptools import setup, Extension __author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '2.4.0'",
"solid-state materials, biochemistry, or related areas. It provides a broad base of chemical",
"materials, biochemistry, or related areas. It provides a broad base of chemical functionality",
"location.\"\"\" try: version = pkgconfig('openbabel-2.0', '--modversion') if not StrictVersion(version) >= StrictVersion('2.3.0'): print('Warning: Open",
"'For example:\\n' ' python setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n' ' python setup.py install') sys.exit(1)",
"in install command.\"\"\" def run(self): self.run_command('build_ext') install.run(self) class CustomSdist(sdist): \"\"\"Add swig interface files",
"provides a Python wrapper to the Open Babel C++ chemistry library. Open Babel",
"'-small', '-O', '-templatereduce', '-naturalvar'] self.swig_opts += ['-I%s' % i for i in self.include_dirs]",
"link = 'hard' if hasattr(os, 'link') else None self.copy_file('../stereo.i', base_dir, link=link) self.copy_file('../openbabel-python.i', base_dir,",
"MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System ::",
"None self.copy_file('../stereo.i', base_dir, link=link) self.copy_file('../openbabel-python.i', base_dir, link=link) class CustomBuildExt(build_ext): \"\"\"Custom build_ext to set",
"allowing anyone to search, convert, analyze, or store data from molecular modeling, chemistry,",
"'<NAME>' __email__ = '<EMAIL>' __version__ = '2.4.0' __license__ = 'GPL' if os.path.exists('README.rst'): long_description",
"automatically determined by pkg-config:') except PkgConfigError as e: print('Warning: %s.\\nGuessing Open Babel location:'",
"'--variable=pkgincludedir') library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir') print('Open Babel location automatically determined by pkg-config:') except",
"It provides a broad base of chemical functionality for custom development. ''' class",
"installed?\\n' 'You may need to manually specify the location of Open Babel include",
"CustomSdist}, py_modules=['openbabel', 'pybel'], ext_modules=[obextension], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment ::",
"'2.4.0' __license__ = 'GPL' if os.path.exists('README.rst'): long_description = open('README.rst').read() else: long_description = '''",
"self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs) self.swig_opts = ['-c++', '-small', '-O', '-templatereduce', '-naturalvar'] self.swig_opts += ['-I%s' %",
"self.swig_opts = ['-c++', '-small', '-O', '-templatereduce', '-naturalvar'] self.swig_opts += ['-I%s' % i for",
"'-templatereduce', '-naturalvar'] self.swig_opts += ['-I%s' % i for i in self.include_dirs] print('- include_dirs:",
"include_dirs, library_dirs = locate_ob() self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs) self.swig_opts = ['-c++', '-small', '-O', '-templatereduce', '-naturalvar']",
"%s could not be found by pkg-config' % package) return stdout.strip() except OSError:",
"self.copy_file('../openbabel-python.i', base_dir, link=link) class CustomBuildExt(build_ext): \"\"\"Custom build_ext to set SWIG options and print",
"+= ['-I%s' % i for i in self.include_dirs] print('- include_dirs: %s\\n- library_dirs: %s'",
"\"\"\"Ensure build_ext runs first in install command.\"\"\" def run(self): self.run_command('build_ext') install.run(self) class CustomSdist(sdist):",
"CustomInstall, 'sdist': CustomSdist}, py_modules=['openbabel', 'pybel'], ext_modules=[obextension], classifiers=[ 'Development Status :: 5 - Production/Stable',",
"constructor allows them to be # overridden using -I and -L command line",
"Microsoft :: Windows', 'Operating System :: OS Independent', 'Operating System :: POSIX', 'Operating",
"build_ext from setuptools.command.install import install from setuptools import setup, Extension __author__ = '<NAME>'",
"first in install command.\"\"\" def run(self): self.run_command('build_ext') install.run(self) class CustomSdist(sdist): \"\"\"Add swig interface",
"CustomBuild, 'build_ext': CustomBuildExt, 'install': CustomInstall, 'sdist': CustomSdist}, py_modules=['openbabel', 'pybel'], ext_modules=[obextension], classifiers=[ 'Development Status",
"\"\"\"Ensure build_ext runs first in build command.\"\"\" def run(self): self.run_command('build_ext') build.run(self) class CustomInstall(install):",
"'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering ::",
"It's an open, collaborative project allowing anyone to search, convert, analyze, or store",
"__email__ = '<EMAIL>' __version__ = '2.4.0' __license__ = 'GPL' if os.path.exists('README.rst'): long_description =",
"from distutils.version import StrictVersion from setuptools.command.build_ext import build_ext from setuptools.command.install import install from",
"%s' % (self.include_dirs, self.library_dirs)) def swig_sources(self, sources, extension): try: return build_ext.swig_sources(self, sources, extension)",
"from setuptools.command.build_ext import build_ext from setuptools.command.install import install from setuptools import setup, Extension",
"pkgconfig('openbabel-2.0', '--variable=libdir') print('Open Babel location automatically determined by pkg-config:') except PkgConfigError as e:",
"Console', 'Environment :: Other Environment', 'Intended Audience :: Education', 'Intended Audience :: Science/Research',",
":: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Other Environment', 'Intended Audience",
"= pkgconfig('openbabel-2.0', '--variable=pkgincludedir') library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir') print('Open Babel location automatically determined by",
"not be found') def locate_ob(): \"\"\"Try use pkgconfig to locate Open Babel, otherwise",
"cmdclass={'build': CustomBuild, 'build_ext': CustomBuildExt, 'install': CustomInstall, 'sdist': CustomSdist}, py_modules=['openbabel', 'pybel'], ext_modules=[obextension], classifiers=[ 'Development",
"line options to python setup.py build_ext. build_ext.finalize_options(self) include_dirs, library_dirs = locate_ob() self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs)",
"analyze, or store data from molecular modeling, chemistry, solid-state materials, biochemistry, or related",
"2.3.0 or later is required. Your version (%s) may not be compatible.' %",
"'pybel'], ext_modules=[obextension], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment",
"= ['-c++', '-small', '-O', '-templatereduce', '-naturalvar'] self.swig_opts += ['-I%s' % i for i",
"molecular modeling, chemistry, solid-state materials, biochemistry, or related areas. It provides a broad",
"link=link) class CustomBuildExt(build_ext): \"\"\"Custom build_ext to set SWIG options and print a better",
"stdout.strip() except OSError: raise PkgConfigError('pkg-config could not be found') def locate_ob(): \"\"\"Try use",
">= StrictVersion('2.3.0'): print('Warning: Open Babel 2.3.0 or later is required. Your version (%s)",
"include_dirs: %s\\n- library_dirs: %s' % (self.include_dirs, self.library_dirs)) def swig_sources(self, sources, extension): try: return",
"include and library directories. ' 'For example:\\n' ' python setup.py build_ext -I/usr/local/include/openbabel-2.0 -L/usr/local/lib\\n'",
"chemistry library. Open Babel is a chemical toolbox designed to speak the many",
"except OSError: raise PkgConfigError('pkg-config could not be found') def locate_ob(): \"\"\"Try use pkgconfig",
"be compatible.' % version) include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir') library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir') print('Open",
"Open Babel, otherwise guess default location.\"\"\" try: version = pkgconfig('openbabel-2.0', '--modversion') if not",
"files) link = 'hard' if hasattr(os, 'link') else None self.copy_file('../stereo.i', base_dir, link=link) self.copy_file('../openbabel-python.i',",
"locate_ob() self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs) self.swig_opts = ['-c++', '-small', '-O', '-templatereduce', '-naturalvar'] self.swig_opts += ['-I%s'",
"and -L command line options to python setup.py build_ext. build_ext.finalize_options(self) include_dirs, library_dirs =",
"or store data from molecular modeling, chemistry, solid-state materials, biochemistry, or related areas.",
"Unix', 'Programming Language :: C++', 'Programming Language :: Python', 'Topic :: Scientific/Engineering ::",
"Babel C++ chemistry library. Open Babel is a chemical toolbox designed to speak",
"Approved :: GNU General Public License (GPL)', 'Natural Language :: English', 'Operating System",
"Open Babel C++ chemistry library. Open Babel is a chemical toolbox designed to",
"not StrictVersion(version) >= StrictVersion('2.3.0'): print('Warning: Open Babel 2.3.0 or later is required. Your",
"stderr = p.communicate() if stderr: raise PkgConfigError('package %s could not be found by",
":: POSIX :: Linux', 'Operating System :: Unix', 'Programming Language :: C++', 'Programming",
"chemical data. It's an open, collaborative project allowing anyone to search, convert, analyze,",
"build_ext to set SWIG options and print a better error message.\"\"\" def finalize_options(self):",
"DistutilsExecError: print('\\nError: SWIG failed. Is Open Babel installed?\\n' 'You may need to manually",
"' python setup.py install') sys.exit(1) obextension = Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel']) setup(name='openbabel', version=__version__, author=__author__,",
"self.library_dirs)) def swig_sources(self, sources, extension): try: return build_ext.swig_sources(self, sources, extension) except DistutilsExecError: print('\\nError:",
"python setup.py build_ext. build_ext.finalize_options(self) include_dirs, library_dirs = locate_ob() self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs) self.swig_opts = ['-c++',",
"StrictVersion from setuptools.command.build_ext import build_ext from setuptools.command.install import install from setuptools import setup,",
"Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Software Development ::",
"import DistutilsExecError from distutils.version import StrictVersion from setuptools.command.build_ext import build_ext from setuptools.command.install import",
"Bio-Informatics', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Software Development :: Libraries' ]",
"'Operating System :: Unix', 'Programming Language :: C++', 'Programming Language :: Python', 'Topic",
"swig_sources(self, sources, extension): try: return build_ext.swig_sources(self, sources, extension) except DistutilsExecError: print('\\nError: SWIG failed.",
"of Open Babel include and library directories. ' 'For example:\\n' ' python setup.py",
"e: print('Warning: %s.\\nGuessing Open Babel location:' % e) include_dirs = '/usr/local/include/openbabel-2.0' library_dirs =",
"location:' % e) include_dirs = '/usr/local/include/openbabel-2.0' library_dirs = '/usr/local/lib' return include_dirs, library_dirs class",
"is required. Your version (%s) may not be compatible.' % version) include_dirs =",
"include_dirs, library_dirs, swig_opts here instead of in Extension constructor allows them to be",
"stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = p.communicate() if stderr: raise PkgConfigError('package %s could",
"distutils.errors import DistutilsExecError from distutils.version import StrictVersion from setuptools.command.build_ext import build_ext from setuptools.command.install",
"-L command line options to python setup.py build_ext. build_ext.finalize_options(self) include_dirs, library_dirs = locate_ob()",
"in build command.\"\"\" def run(self): self.run_command('build_ext') build.run(self) class CustomInstall(install): \"\"\"Ensure build_ext runs first",
"setup(name='openbabel', version=__version__, author=__author__, author_email=__email__, license=__license__, url='http://openbabel.org/', description='Python interface to the Open Babel chemistry",
"'Environment :: Other Environment', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License",
"of in Extension constructor allows them to be # overridden using -I and",
"the Open Babel C++ chemistry library. Open Babel is a chemical toolbox designed",
"Linux', 'Operating System :: Unix', 'Programming Language :: C++', 'Programming Language :: Python',",
"runs first in install command.\"\"\" def run(self): self.run_command('build_ext') install.run(self) class CustomSdist(sdist): \"\"\"Add swig",
"CustomBuildExt, 'install': CustomInstall, 'sdist': CustomSdist}, py_modules=['openbabel', 'pybel'], ext_modules=[obextension], classifiers=[ 'Development Status :: 5",
"stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = p.communicate() if stderr: raise PkgConfigError('package %s could not",
"% version) include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir') library_dirs = pkgconfig('openbabel-2.0', '--variable=libdir') print('Open Babel location",
"License (GPL)', 'Natural Language :: English', 'Operating System :: MacOS :: MacOS X',",
"project allowing anyone to search, convert, analyze, or store data from molecular modeling,",
"Open Babel include and library directories. ' 'For example:\\n' ' python setup.py build_ext",
"'<EMAIL>' __version__ = '2.4.0' __license__ = 'GPL' if os.path.exists('README.rst'): long_description = open('README.rst').read() else:",
"ext_modules=[obextension], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment ::",
"try: return build_ext.swig_sources(self, sources, extension) except DistutilsExecError: print('\\nError: SWIG failed. Is Open Babel",
"'Environment :: Console', 'Environment :: Other Environment', 'Intended Audience :: Education', 'Intended Audience",
"'--modversion') if not StrictVersion(version) >= StrictVersion('2.3.0'): print('Warning: Open Babel 2.3.0 or later is",
"build_ext.finalize_options(self) include_dirs, library_dirs = locate_ob() self.include_dirs.append(include_dirs) self.library_dirs.append(library_dirs) self.swig_opts = ['-c++', '-small', '-O', '-templatereduce',",
"tool.\"\"\" try: p = subprocess.Popen(['pkg-config', option, package], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr =",
"(%s) may not be compatible.' % version) include_dirs = pkgconfig('openbabel-2.0', '--variable=pkgincludedir') library_dirs =",
"universal_newlines=True) stdout, stderr = p.communicate() if stderr: raise PkgConfigError('package %s could not be",
"setup, Extension __author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '2.4.0' __license__ =",
"chemistry, solid-state materials, biochemistry, or related areas. It provides a broad base of",
"to locate Open Babel, otherwise guess default location.\"\"\" try: version = pkgconfig('openbabel-2.0', '--modversion')",
"Open Babel location:' % e) include_dirs = '/usr/local/include/openbabel-2.0' library_dirs = '/usr/local/lib' return include_dirs,",
"- Production/Stable', 'Environment :: Console', 'Environment :: Other Environment', 'Intended Audience :: Education',",
"Windows', 'Operating System :: OS Independent', 'Operating System :: POSIX', 'Operating System ::",
"later is required. Your version (%s) may not be compatible.' % version) include_dirs",
"of chemical functionality for custom development. ''' class PkgConfigError(Exception): pass def pkgconfig(package, option):",
":: Unix', 'Programming Language :: C++', 'Programming Language :: Python', 'Topic :: Scientific/Engineering",
"package provides a Python wrapper to the Open Babel C++ chemistry library. Open",
":: POSIX', 'Operating System :: POSIX :: Linux', 'Operating System :: Unix', 'Programming",
"class CustomInstall(install): \"\"\"Ensure build_ext runs first in install command.\"\"\" def run(self): self.run_command('build_ext') install.run(self)",
"is a chemical toolbox designed to speak the many languages of chemical data.",
"def pkgconfig(package, option): \"\"\"Wrapper around pkg-config command line tool.\"\"\" try: p = subprocess.Popen(['pkg-config',",
"POSIX :: Linux', 'Operating System :: Unix', 'Programming Language :: C++', 'Programming Language",
":: C++', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic ::",
"print('Warning: Open Babel 2.3.0 or later is required. Your version (%s) may not",
"self.include_dirs] print('- include_dirs: %s\\n- library_dirs: %s' % (self.include_dirs, self.library_dirs)) def swig_sources(self, sources, extension):",
"The Open Babel package provides a Python wrapper to the Open Babel C++",
"return stdout.strip() except OSError: raise PkgConfigError('pkg-config could not be found') def locate_ob(): \"\"\"Try",
"PkgConfigError('pkg-config could not be found') def locate_ob(): \"\"\"Try use pkgconfig to locate Open",
"Babel include and library directories. ' 'For example:\\n' ' python setup.py build_ext -I/usr/local/include/openbabel-2.0",
"Open Babel 2.3.0 or later is required. Your version (%s) may not be",
"def finalize_options(self): # Setting include_dirs, library_dirs, swig_opts here instead of in Extension constructor",
"data from molecular modeling, chemistry, solid-state materials, biochemistry, or related areas. It provides",
"options and print a better error message.\"\"\" def finalize_options(self): # Setting include_dirs, library_dirs,",
"self.library_dirs.append(library_dirs) self.swig_opts = ['-c++', '-small', '-O', '-templatereduce', '-naturalvar'] self.swig_opts += ['-I%s' % i",
"obextension = Extension('_openbabel', ['openbabel-python.i'], libraries=['openbabel']) setup(name='openbabel', version=__version__, author=__author__, author_email=__email__, license=__license__, url='http://openbabel.org/', description='Python interface",
"Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public License (GPL)',",
":: Python', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic",
"OSError: raise PkgConfigError('pkg-config could not be found') def locate_ob(): \"\"\"Try use pkgconfig to",
"print('\\nError: SWIG failed. Is Open Babel installed?\\n' 'You may need to manually specify",
"found by pkg-config' % package) return stdout.strip() except OSError: raise PkgConfigError('pkg-config could not",
"allows them to be # overridden using -I and -L command line options"
] |
[
"# -*- coding: utf-8 -*- from __future__ import division import unittest import odelab",
"= 5e-9 V0 = .01 def test_run(self): sys = Differentiator(LinBumpSignal(self.V0,self.t0)) ## sys.kT =",
"Test_Differentiator(unittest.TestCase): t0 = 5e-9 V0 = .01 def test_run(self): sys = Differentiator(LinBumpSignal(self.V0,self.t0)) ##",
"= Differentiator(LinBumpSignal(self.V0,self.t0)) ## sys.kT = 0. # no noise scheme = EulerMaruyama() ##",
"= EulerMaruyama() ## scheme.h = 2.5e-11 scheme.h = self.t0 self.s = SingleStepSolver(scheme, sys)",
"self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0=np.array([1.])) self.s.run(time=1.) class Test_Differentiator(unittest.TestCase): t0 = 5e-9 V0 =",
"= 0. # no noise scheme = EulerMaruyama() ## scheme.h = 2.5e-11 scheme.h",
"from odelab.scheme.stochastic import * from odelab.system import * from odelab.solver import * import",
"V0 = .01 def test_run(self): sys = Differentiator(LinBumpSignal(self.V0,self.t0)) ## sys.kT = 0. #",
"as np class Test_OU(unittest.TestCase): def test_run(self): sys = OrnsteinUhlenbeck() scheme = EulerMaruyama() scheme.h",
"import unittest import odelab from odelab.scheme.stochastic import * from odelab.system import * from",
"no noise scheme = EulerMaruyama() ## scheme.h = 2.5e-11 scheme.h = self.t0 self.s",
"* from odelab.solver import * import numpy as np class Test_OU(unittest.TestCase): def test_run(self):",
"coding: utf-8 -*- from __future__ import division import unittest import odelab from odelab.scheme.stochastic",
"from __future__ import division import unittest import odelab from odelab.scheme.stochastic import * from",
"EulerMaruyama() scheme.h = .01 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0=np.array([1.])) self.s.run(time=1.) class Test_Differentiator(unittest.TestCase): t0",
"import division import unittest import odelab from odelab.scheme.stochastic import * from odelab.system import",
"odelab.system import * from odelab.solver import * import numpy as np class Test_OU(unittest.TestCase):",
"import * import numpy as np class Test_OU(unittest.TestCase): def test_run(self): sys = OrnsteinUhlenbeck()",
"sys.kT = 0. # no noise scheme = EulerMaruyama() ## scheme.h = 2.5e-11",
"Test_OU(unittest.TestCase): def test_run(self): sys = OrnsteinUhlenbeck() scheme = EulerMaruyama() scheme.h = .01 self.s",
"= .01 def test_run(self): sys = Differentiator(LinBumpSignal(self.V0,self.t0)) ## sys.kT = 0. # no",
"-*- from __future__ import division import unittest import odelab from odelab.scheme.stochastic import *",
"utf-8 -*- from __future__ import division import unittest import odelab from odelab.scheme.stochastic import",
"OrnsteinUhlenbeck() scheme = EulerMaruyama() scheme.h = .01 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0=np.array([1.])) self.s.run(time=1.)",
"class Test_Differentiator(unittest.TestCase): t0 = 5e-9 V0 = .01 def test_run(self): sys = Differentiator(LinBumpSignal(self.V0,self.t0))",
"scheme = EulerMaruyama() scheme.h = .01 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0=np.array([1.])) self.s.run(time=1.) class",
"5e-9 V0 = .01 def test_run(self): sys = Differentiator(LinBumpSignal(self.V0,self.t0)) ## sys.kT = 0.",
"__future__ import division import unittest import odelab from odelab.scheme.stochastic import * from odelab.system",
"import * from odelab.solver import * import numpy as np class Test_OU(unittest.TestCase): def",
"sys = Differentiator(LinBumpSignal(self.V0,self.t0)) ## sys.kT = 0. # no noise scheme = EulerMaruyama()",
"Differentiator(LinBumpSignal(self.V0,self.t0)) ## sys.kT = 0. # no noise scheme = EulerMaruyama() ## scheme.h",
"unittest import odelab from odelab.scheme.stochastic import * from odelab.system import * from odelab.solver",
"scheme.h = 2.5e-11 scheme.h = self.t0 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0 = np.array([0,0,0,0,0.]))",
"from odelab.solver import * import numpy as np class Test_OU(unittest.TestCase): def test_run(self): sys",
"class Test_OU(unittest.TestCase): def test_run(self): sys = OrnsteinUhlenbeck() scheme = EulerMaruyama() scheme.h = .01",
"SingleStepSolver(scheme, sys) self.s.initialize(u0=np.array([1.])) self.s.run(time=1.) class Test_Differentiator(unittest.TestCase): t0 = 5e-9 V0 = .01 def",
"## sys.kT = 0. # no noise scheme = EulerMaruyama() ## scheme.h =",
"scheme = EulerMaruyama() ## scheme.h = 2.5e-11 scheme.h = self.t0 self.s = SingleStepSolver(scheme,",
"* from odelab.system import * from odelab.solver import * import numpy as np",
"* import numpy as np class Test_OU(unittest.TestCase): def test_run(self): sys = OrnsteinUhlenbeck() scheme",
"= SingleStepSolver(scheme, sys) self.s.initialize(u0=np.array([1.])) self.s.run(time=1.) class Test_Differentiator(unittest.TestCase): t0 = 5e-9 V0 = .01",
"def test_run(self): sys = OrnsteinUhlenbeck() scheme = EulerMaruyama() scheme.h = .01 self.s =",
"## scheme.h = 2.5e-11 scheme.h = self.t0 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0 =",
"from odelab.system import * from odelab.solver import * import numpy as np class",
"self.s.run(time=1.) class Test_Differentiator(unittest.TestCase): t0 = 5e-9 V0 = .01 def test_run(self): sys =",
"import odelab from odelab.scheme.stochastic import * from odelab.system import * from odelab.solver import",
"self.s.initialize(u0=np.array([1.])) self.s.run(time=1.) class Test_Differentiator(unittest.TestCase): t0 = 5e-9 V0 = .01 def test_run(self): sys",
"numpy as np class Test_OU(unittest.TestCase): def test_run(self): sys = OrnsteinUhlenbeck() scheme = EulerMaruyama()",
"sys) self.s.initialize(u0=np.array([1.])) self.s.run(time=1.) class Test_Differentiator(unittest.TestCase): t0 = 5e-9 V0 = .01 def test_run(self):",
"0. # no noise scheme = EulerMaruyama() ## scheme.h = 2.5e-11 scheme.h =",
"test_run(self): sys = OrnsteinUhlenbeck() scheme = EulerMaruyama() scheme.h = .01 self.s = SingleStepSolver(scheme,",
"= EulerMaruyama() scheme.h = .01 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0=np.array([1.])) self.s.run(time=1.) class Test_Differentiator(unittest.TestCase):",
"= .01 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0=np.array([1.])) self.s.run(time=1.) class Test_Differentiator(unittest.TestCase): t0 = 5e-9",
"test_run(self): sys = Differentiator(LinBumpSignal(self.V0,self.t0)) ## sys.kT = 0. # no noise scheme =",
"= OrnsteinUhlenbeck() scheme = EulerMaruyama() scheme.h = .01 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0=np.array([1.]))",
"def test_run(self): sys = Differentiator(LinBumpSignal(self.V0,self.t0)) ## sys.kT = 0. # no noise scheme",
"EulerMaruyama() ## scheme.h = 2.5e-11 scheme.h = self.t0 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0",
"import * from odelab.system import * from odelab.solver import * import numpy as",
"noise scheme = EulerMaruyama() ## scheme.h = 2.5e-11 scheme.h = self.t0 self.s =",
"# no noise scheme = EulerMaruyama() ## scheme.h = 2.5e-11 scheme.h = self.t0",
"division import unittest import odelab from odelab.scheme.stochastic import * from odelab.system import *",
"sys = OrnsteinUhlenbeck() scheme = EulerMaruyama() scheme.h = .01 self.s = SingleStepSolver(scheme, sys)",
"scheme.h = .01 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0=np.array([1.])) self.s.run(time=1.) class Test_Differentiator(unittest.TestCase): t0 =",
"odelab.scheme.stochastic import * from odelab.system import * from odelab.solver import * import numpy",
".01 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0=np.array([1.])) self.s.run(time=1.) class Test_Differentiator(unittest.TestCase): t0 = 5e-9 V0",
"t0 = 5e-9 V0 = .01 def test_run(self): sys = Differentiator(LinBumpSignal(self.V0,self.t0)) ## sys.kT",
"import numpy as np class Test_OU(unittest.TestCase): def test_run(self): sys = OrnsteinUhlenbeck() scheme =",
".01 def test_run(self): sys = Differentiator(LinBumpSignal(self.V0,self.t0)) ## sys.kT = 0. # no noise",
"odelab.solver import * import numpy as np class Test_OU(unittest.TestCase): def test_run(self): sys =",
"np class Test_OU(unittest.TestCase): def test_run(self): sys = OrnsteinUhlenbeck() scheme = EulerMaruyama() scheme.h =",
"= 2.5e-11 scheme.h = self.t0 self.s = SingleStepSolver(scheme, sys) self.s.initialize(u0 = np.array([0,0,0,0,0.])) self.s.run(time=5*self.t0)",
"odelab from odelab.scheme.stochastic import * from odelab.system import * from odelab.solver import *",
"-*- coding: utf-8 -*- from __future__ import division import unittest import odelab from"
] |
[
"await ctx.send(\"You need to register first! Do `$register`\") @pot.error async def pot_error(ctx, error):",
"commands.CommandNotFound): await ctx.send(\"That wasn't a valid command!\\nType `$help` to see a list of",
"your daily reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command()",
"Do `$register`\") @pot.error async def pot_error(ctx, error): await ctx.send('Make sure you have the",
"ctx.send('Make sure you have the recipient in the command: `$pot <recipient>`') @bot.command() async",
"a :honey_pot: to hasn't registered yet! Tell them to do `$register`\") return if",
"if str(ctx.message.author.id) in data: if data[str(ctx.message.author.id)][\"honey_potter\"]: if not str(recipient.id) in data: await ctx.send(\"The",
"= unicode_max_answers[:len(question[\"choices\"])] answer = question[\"choices\"].index(question[\"answer\"]) for i in range(len(question[\"choices\"])): message += trivia_answers[i] +",
"daily reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() @commands.cooldown(1,",
":honey_pot:! (worth 100 :candy:)\") else: await ctx.send(\"Hey, you don't have permission to do",
"discord_id = str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You need to register",
"+ question[\"choices\"][unicode_answers.index(str(reaction))] + \" was incorrect... The correct answer was \" + trivia_answers[answer]",
"bot = commands.Bot(command_prefix='$') with open(\"../data.json\") as data_file: data = json.load(data_file) @bot.event async def",
"need to register first! Do `$register`\") else: data[discord_id][\"points\"] += 10 await ctx.send(\"You got",
"have \" + str(data[discord_id][\"points\"]) + \" :candy:\") else: await ctx.send(\"You need to register",
"recipient in the command: `$pot <recipient>`') @bot.command() async def trivia(ctx): discord_id = str(ctx.message.author.id)",
"+ \" \" + question[\"choices\"][i] + \"\\n\" sent = await ctx.send(message) for c",
"reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() async def",
"on_ready(): print('Logged on as ' + str(bot.user)) @bot.event async def on_command_error(ctx, error): if",
"to hasn't registered yet! Tell them to do `$register`\") return if ctx.message.author.id ==",
"see a list of commands.\") else: raise error @bot.command() @commands.cooldown(1, 60 * 60",
"\" + str(data[discord_id][\"points\"]) + \" :candy:\") else: await ctx.send(\"You need to register first!",
"raise error @bot.command() @commands.cooldown(1, 60 * 60 * 24, commands.BucketType.user) async def daily(ctx):",
"+ str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() async def pot(ctx, recipient: discord.Member): if str(ctx.message.author.id)",
"`$register`\") else: data[discord_id][\"points\"] += 1 await ctx.send(\"You got 1 :candy:\") _save() @daily.error async",
"commands.BucketType.user) async def daily(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await",
"timeout=60.0, check=check) except asyncio.TimeoutError: await ctx.send(\"Trivia question for <@\" + str(ctx.message.author.id) + \">",
"register first! Do `$register`\") else: data[discord_id][\"points\"] += 1 await ctx.send(\"You got 1 :candy:\")",
"\":regional_indicator_c:\", \":regional_indicator_d:\"] unicode_max_answers = ['🇦', '🇧', '🇨', '🇩'] bot = commands.Bot(command_prefix='$') with open(\"../data.json\")",
"+ \" :candy:\") else: await ctx.send(\"You need to register first! Do `$register`\") @bot.command()",
"open(\"../data.json\") as data_file: data = json.load(data_file) @bot.event async def on_ready(): print('Logged on as",
"[\":regional_indicator_a:\", \":regional_indicator_b:\", \":regional_indicator_c:\", \":regional_indicator_d:\"] unicode_max_answers = ['🇦', '🇧', '🇨', '🇩'] bot = commands.Bot(command_prefix='$')",
"first! Do `$register`\") return with open(\"../trivia/\" + random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\") as trivia_file: trivia_questions =",
"await ctx.send(\"You need to register first! Do `$register`\") else: data[discord_id][\"points\"] += 10 await",
"raise error @bot.command() @commands.cooldown(1, 60 * 60 * 24 * 30, commands.BucketType.user) async",
"+ str(bot.user)) @bot.event async def on_command_error(ctx, error): if isinstance(error, commands.CommandNotFound): await ctx.send(\"That wasn't",
"answer was \" + trivia_answers[answer] + \" \" + question[\"answer\"] + \". You",
"\"Question for <@\" + str(ctx.message.author.id) + \">:\\n\" + question[\"question\"] + \"\\n\\n\" unicode_answers =",
"\"\\n\\n\" unicode_answers = unicode_max_answers[:len(question[\"choices\"])] answer = question[\"choices\"].index(question[\"answer\"]) for i in range(len(question[\"choices\"])): message +=",
"need to register first! Do `$register`\") else: data[discord_id][\"points\"] += 1 await ctx.send(\"You got",
"+ str(recipient.id) + \"> a :honey_pot:! (worth 100 :candy:)\") else: await ctx.send(\"Hey, you",
"_save(): with open('../data.json', 'w+') as file_save: json.dump(data, file_save) with open('../secret.txt', 'r') as f:",
"error): await ctx.send('Make sure you have the recipient in the command: `$pot <recipient>`')",
"async def pot_error(ctx, error): await ctx.send('Make sure you have the recipient in the",
"answer = question[\"choices\"].index(question[\"answer\"]) for i in range(len(question[\"choices\"])): message += trivia_answers[i] + \" \"",
"import commands import json import math import datetime import os import random trivia_answers",
"{} data[discord_id][\"points\"] = 0 data[discord_id][\"honey_potter\"] = False await ctx.send(\"You've been registered!\") _save() daily.reset_cooldown(ctx)",
"return with open(\"../trivia/\" + random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\") as trivia_file: trivia_questions = json.load(trivia_file) question =",
"await ctx.send(\"You need to register first! Do `$register`\") return with open(\"../trivia/\" + random.choice(os.listdir(\"../trivia\")),",
"\" + question[\"answer\"] + \". You lost 1 :candy:\") _save() @bot.command() async def",
"i in range(len(question[\"choices\"])): message += trivia_answers[i] + \" \" + question[\"choices\"][i] + \"\\n\"",
"question for <@\" + str(ctx.message.author.id) + \"> timed out.\") return if unicode_answers.index(str(reaction)) ==",
"str(ctx.message.author.id) + \">:\\n\" + question[\"question\"] + \"\\n\\n\" unicode_answers = unicode_max_answers[:len(question[\"choices\"])] answer = question[\"choices\"].index(question[\"answer\"])",
"ctx.send(\"You got 1 :chocolate_bar: (equivalent to 10 :candy:)\") _save() @monthly.error async def monthly_error(ctx,",
"-= 1 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + str(reaction) + \"",
"to register first! Do `$register`\") @pot.error async def pot_error(ctx, error): await ctx.send('Make sure",
"error @bot.command() async def pot(ctx, recipient: discord.Member): if str(ctx.message.author.id) in data: if data[str(ctx.message.author.id)][\"honey_potter\"]:",
"if data[str(ctx.message.author.id)][\"honey_potter\"]: if not str(recipient.id) in data: await ctx.send(\"The person you're giving a",
"+= 1 await ctx.send(\"You got 1 :candy:\") _save() @daily.error async def daily_error(ctx, error):",
"random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\") as trivia_file: trivia_questions = json.load(trivia_file) question = trivia_questions[random.randint(0, len(trivia_questions) - 1)]",
"unicode_max_answers[:len(question[\"choices\"])] answer = question[\"choices\"].index(question[\"answer\"]) for i in range(len(question[\"choices\"])): message += trivia_answers[i] + \"",
"isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your monthly reward! Try again in '",
"async def on_command_error(ctx, error): if isinstance(error, commands.CommandNotFound): await ctx.send(\"That wasn't a valid command!\\nType",
"* 24, commands.BucketType.user) async def daily(ctx): discord_id = str(ctx.message.author.id) if discord_id not in",
"trivia_answers[i] + \" \" + question[\"choices\"][i] + \"\\n\" sent = await ctx.send(message) for",
"= json.load(data_file) @bot.event async def on_ready(): print('Logged on as ' + str(bot.user)) @bot.event",
"message = \"Question for <@\" + str(ctx.message.author.id) + \">:\\n\" + question[\"question\"] + \"\\n\\n\"",
"commands.\") else: raise error @bot.command() @commands.cooldown(1, 60 * 60 * 24, commands.BucketType.user) async",
"question[\"choices\"][i] + \"\\n\" sent = await ctx.send(message) for c in unicode_answers: await sent.add_reaction(c)",
"can't pot yourself... :unamused:\") return data[str(recipient.id)][\"points\"] += 100 _save() await ctx.send(\"Wow! <@\" +",
"await ctx.send(message) for c in unicode_answers: await sent.add_reaction(c) print(trivia_answers[answer] + \" \" +",
"@commands.cooldown(1, 60 * 60 * 24 * 30, commands.BucketType.user) async def monthly(ctx): discord_id",
":honey_pot: to hasn't registered yet! Tell them to do `$register`\") return if ctx.message.author.id",
"else: data[discord_id][\"points\"] += 10 await ctx.send(\"You got 1 :chocolate_bar: (equivalent to 10 :candy:)\")",
"30, commands.BucketType.user) async def monthly(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data:",
"ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + str(reaction) + \" \" + question[\"choices\"][unicode_answers.index(str(reaction))]",
"`$register`\") return with open(\"../trivia/\" + random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\") as trivia_file: trivia_questions = json.load(trivia_file) question",
"datetime import os import random trivia_answers = [\":regional_indicator_a:\", \":regional_indicator_b:\", \":regional_indicator_c:\", \":regional_indicator_d:\"] unicode_max_answers =",
"unicode_answers: await sent.add_reaction(c) print(trivia_answers[answer] + \" \" + question[\"answer\"]) def check(reaction_arg, user_arg): return",
"return if unicode_answers.index(str(reaction)) == answer: data[discord_id][\"points\"] += 5 await ctx.send(\"<@\" + str(ctx.message.author.id) +",
"<recipient>`') @bot.command() async def trivia(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data:",
"+ \"> \" + trivia_answers[answer] + \" \" + question[\"answer\"] + \" was",
"register(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: data[discord_id] = {} data[discord_id][\"points\"]",
"\"> \" + str(reaction) + \" \" + question[\"choices\"][unicode_answers.index(str(reaction))] + \" was incorrect...",
"'🇩'] bot = commands.Bot(command_prefix='$') with open(\"../data.json\") as data_file: data = json.load(data_file) @bot.event async",
"\" + trivia_answers[answer] + \" \" + question[\"answer\"] + \" was correct! You",
"print('Logged on as ' + str(bot.user)) @bot.event async def on_command_error(ctx, error): if isinstance(error,",
"to register first! Do `$register`\") else: data[discord_id][\"points\"] += 1 await ctx.send(\"You got 1",
"+ \". You lost 1 :candy:\") _save() @bot.command() async def bal(ctx): discord_id =",
"commands.CommandOnCooldown): await ctx.send('You already claimed your daily reward! Try again in ' +",
"await ctx.send(\"You got 1 :chocolate_bar: (equivalent to 10 :candy:)\") _save() @monthly.error async def",
"\":regional_indicator_d:\"] unicode_max_answers = ['🇦', '🇧', '🇨', '🇩'] bot = commands.Bot(command_prefix='$') with open(\"../data.json\") as",
"check=check) except asyncio.TimeoutError: await ctx.send(\"Trivia question for <@\" + str(ctx.message.author.id) + \"> timed",
"else: data[discord_id][\"points\"] -= 1 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + str(reaction)",
"ctx.send(\"Wow! <@\" + str(ctx.message.author.id) + \"> just gave <@\" + str(recipient.id) + \">",
"data_file: data = json.load(data_file) @bot.event async def on_ready(): print('Logged on as ' +",
"claimed your monthly reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error",
":chocolate_bar: (equivalent to 10 :candy:)\") _save() @monthly.error async def monthly_error(ctx, error): if isinstance(error,",
"await ctx.send('You already claimed your monthly reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after))))",
"in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() async def pot(ctx, recipient: discord.Member):",
"The correct answer was \" + trivia_answers[answer] + \" \" + question[\"answer\"] +",
"ctx.send(\"You can't pot yourself... :unamused:\") return data[str(recipient.id)][\"points\"] += 100 _save() await ctx.send(\"Wow! <@\"",
"discord.Member): if str(ctx.message.author.id) in data: if data[str(ctx.message.author.id)][\"honey_potter\"]: if not str(recipient.id) in data: await",
"question[\"answer\"] + \". You lost 1 :candy:\") _save() @bot.command() async def bal(ctx): discord_id",
"ctx.send(\"You need to register first! Do `$register`\") @pot.error async def pot_error(ctx, error): await",
"ctx.send(\"You need to register first! Do `$register`\") else: data[discord_id][\"points\"] += 1 await ctx.send(\"You",
"Tell them to do `$register`\") return if ctx.message.author.id == recipient.id: await ctx.send(\"You can't",
"data: await ctx.send(\"You need to register first! Do `$register`\") else: data[discord_id][\"points\"] += 10",
"return data[str(recipient.id)][\"points\"] += 100 _save() await ctx.send(\"Wow! <@\" + str(ctx.message.author.id) + \"> just",
"been registered!\") _save() daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx) else: await ctx.send(\"You are already registered!\") def _save():",
"60 * 60 * 24, commands.BucketType.user) async def daily(ctx): discord_id = str(ctx.message.author.id) if",
"@bot.command() async def register(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: data[discord_id]",
"1)] message = \"Question for <@\" + str(ctx.message.author.id) + \">:\\n\" + question[\"question\"] +",
"import discord from discord.ext import commands import json import math import datetime import",
":candy:)\") else: await ctx.send(\"Hey, you don't have permission to do that!\") else: await",
"as trivia_file: trivia_questions = json.load(trivia_file) question = trivia_questions[random.randint(0, len(trivia_questions) - 1)] message =",
"+= 100 _save() await ctx.send(\"Wow! <@\" + str(ctx.message.author.id) + \"> just gave <@\"",
"def monthly(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You need",
"the recipient in the command: `$pot <recipient>`') @bot.command() async def trivia(ctx): discord_id =",
"else: await ctx.send(\"You need to register first! Do `$register`\") @pot.error async def pot_error(ctx,",
"yourself... :unamused:\") return data[str(recipient.id)][\"points\"] += 100 _save() await ctx.send(\"Wow! <@\" + str(ctx.message.author.id) +",
"_save() @daily.error async def daily_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed",
"json import math import datetime import os import random trivia_answers = [\":regional_indicator_a:\", \":regional_indicator_b:\",",
"`$register`\") return if ctx.message.author.id == recipient.id: await ctx.send(\"You can't pot yourself... :unamused:\") return",
"in data: if data[str(ctx.message.author.id)][\"honey_potter\"]: if not str(recipient.id) in data: await ctx.send(\"The person you're",
"== ctx.message.author try: reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check) except asyncio.TimeoutError: await",
"if ctx.message.author.id == recipient.id: await ctx.send(\"You can't pot yourself... :unamused:\") return data[str(recipient.id)][\"points\"] +=",
"str(ctx.message.author.id) + \"> just gave <@\" + str(recipient.id) + \"> a :honey_pot:! (worth",
"in unicode_answers: await sent.add_reaction(c) print(trivia_answers[answer] + \" \" + question[\"answer\"]) def check(reaction_arg, user_arg):",
"data: await ctx.send(\"You have \" + str(data[discord_id][\"points\"]) + \" :candy:\") else: await ctx.send(\"You",
"<@\" + str(ctx.message.author.id) + \">:\\n\" + question[\"question\"] + \"\\n\\n\" unicode_answers = unicode_max_answers[:len(question[\"choices\"])] answer",
"json.load(trivia_file) question = trivia_questions[random.randint(0, len(trivia_questions) - 1)] message = \"Question for <@\" +",
"False await ctx.send(\"You've been registered!\") _save() daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx) else: await ctx.send(\"You are already",
"a list of commands.\") else: raise error @bot.command() @commands.cooldown(1, 60 * 60 *",
"do `$register`\") return if ctx.message.author.id == recipient.id: await ctx.send(\"You can't pot yourself... :unamused:\")",
"\" was incorrect... The correct answer was \" + trivia_answers[answer] + \" \"",
"a valid command!\\nType `$help` to see a list of commands.\") else: raise error",
"discord_id not in data: data[discord_id] = {} data[discord_id][\"points\"] = 0 data[discord_id][\"honey_potter\"] = False",
"+ \" \" + question[\"answer\"]) def check(reaction_arg, user_arg): return user_arg == ctx.message.author try:",
"question[\"answer\"] + \" was correct! You received 1 :lollipop: (equivalent to 5 :candy:)\")",
"sent = await ctx.send(message) for c in unicode_answers: await sent.add_reaction(c) print(trivia_answers[answer] + \"",
"ctx.send(message) for c in unicode_answers: await sent.add_reaction(c) print(trivia_answers[answer] + \" \" + question[\"answer\"])",
"+ \">:\\n\" + question[\"question\"] + \"\\n\\n\" unicode_answers = unicode_max_answers[:len(question[\"choices\"])] answer = question[\"choices\"].index(question[\"answer\"]) for",
"_save() @monthly.error async def monthly_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed",
"first! Do `$register`\") else: data[discord_id][\"points\"] += 1 await ctx.send(\"You got 1 :candy:\") _save()",
"str(bot.user)) @bot.event async def on_command_error(ctx, error): if isinstance(error, commands.CommandNotFound): await ctx.send(\"That wasn't a",
"pot(ctx, recipient: discord.Member): if str(ctx.message.author.id) in data: if data[str(ctx.message.author.id)][\"honey_potter\"]: if not str(recipient.id) in",
"@pot.error async def pot_error(ctx, error): await ctx.send('Make sure you have the recipient in",
"\" + question[\"answer\"]) def check(reaction_arg, user_arg): return user_arg == ctx.message.author try: reaction, user",
"+ \" was incorrect... The correct answer was \" + trivia_answers[answer] + \"",
"as data_file: data = json.load(data_file) @bot.event async def on_ready(): print('Logged on as '",
"def register(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: data[discord_id] = {}",
"if discord_id not in data: await ctx.send(\"You need to register first! Do `$register`\")",
"giving a :honey_pot: to hasn't registered yet! Tell them to do `$register`\") return",
"1 await ctx.send(\"You got 1 :candy:\") _save() @daily.error async def daily_error(ctx, error): if",
"commands import json import math import datetime import os import random trivia_answers =",
"discord_id not in data: await ctx.send(\"You need to register first! Do `$register`\") else:",
"return if ctx.message.author.id == recipient.id: await ctx.send(\"You can't pot yourself... :unamused:\") return data[str(recipient.id)][\"points\"]",
"* 60 * 24 * 30, commands.BucketType.user) async def monthly(ctx): discord_id = str(ctx.message.author.id)",
"daily(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You need to",
"need to register first! Do `$register`\") return with open(\"../trivia/\" + random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\") as",
"timed out.\") return if unicode_answers.index(str(reaction)) == answer: data[discord_id][\"points\"] += 5 await ctx.send(\"<@\" +",
"+ str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() @commands.cooldown(1, 60 * 60 * 24 *",
"str(ctx.message.author.id) + \"> \" + trivia_answers[answer] + \" \" + question[\"answer\"] + \"",
"<@\" + str(recipient.id) + \"> a :honey_pot:! (worth 100 :candy:)\") else: await ctx.send(\"Hey,",
"register first! Do `$register`\") @pot.error async def pot_error(ctx, error): await ctx.send('Make sure you",
"await ctx.send(\"You got 1 :candy:\") _save() @daily.error async def daily_error(ctx, error): if isinstance(error,",
"for i in range(len(question[\"choices\"])): message += trivia_answers[i] + \" \" + question[\"choices\"][i] +",
"already claimed your daily reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise",
"daily_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your daily reward! Try",
"You received 1 :lollipop: (equivalent to 5 :candy:)\") _save() else: data[discord_id][\"points\"] -= 1",
"@bot.command() @commands.cooldown(1, 60 * 60 * 24 * 30, commands.BucketType.user) async def monthly(ctx):",
"correct! You received 1 :lollipop: (equivalent to 5 :candy:)\") _save() else: data[discord_id][\"points\"] -=",
"isinstance(error, commands.CommandNotFound): await ctx.send(\"That wasn't a valid command!\\nType `$help` to see a list",
"in data: await ctx.send(\"The person you're giving a :honey_pot: to hasn't registered yet!",
"= question[\"choices\"].index(question[\"answer\"]) for i in range(len(question[\"choices\"])): message += trivia_answers[i] + \" \" +",
"\"> just gave <@\" + str(recipient.id) + \"> a :honey_pot:! (worth 100 :candy:)\")",
"1 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + str(reaction) + \" \"",
"valid command!\\nType `$help` to see a list of commands.\") else: raise error @bot.command()",
"= str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You need to register first!",
"60 * 24, commands.BucketType.user) async def daily(ctx): discord_id = str(ctx.message.author.id) if discord_id not",
"await ctx.send(\"You can't pot yourself... :unamused:\") return data[str(recipient.id)][\"points\"] += 100 _save() await ctx.send(\"Wow!",
"str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() async def pot(ctx, recipient: discord.Member): if str(ctx.message.author.id) in",
"just gave <@\" + str(recipient.id) + \"> a :honey_pot:! (worth 100 :candy:)\") else:",
"sent.add_reaction(c) print(trivia_answers[answer] + \" \" + question[\"answer\"]) def check(reaction_arg, user_arg): return user_arg ==",
"on as ' + str(bot.user)) @bot.event async def on_command_error(ctx, error): if isinstance(error, commands.CommandNotFound):",
"+ \"\\n\" sent = await ctx.send(message) for c in unicode_answers: await sent.add_reaction(c) print(trivia_answers[answer]",
"received 1 :lollipop: (equivalent to 5 :candy:)\") _save() else: data[discord_id][\"points\"] -= 1 await",
"Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() async def pot(ctx,",
"= str(ctx.message.author.id) if discord_id in data: await ctx.send(\"You have \" + str(data[discord_id][\"points\"]) +",
"your monthly reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command()",
"+ str(ctx.message.author.id) + \"> \" + str(reaction) + \" \" + question[\"choices\"][unicode_answers.index(str(reaction))] +",
"ctx.send(\"Trivia question for <@\" + str(ctx.message.author.id) + \"> timed out.\") return if unicode_answers.index(str(reaction))",
"(equivalent to 5 :candy:)\") _save() else: data[discord_id][\"points\"] -= 1 await ctx.send(\"<@\" + str(ctx.message.author.id)",
"trivia_questions[random.randint(0, len(trivia_questions) - 1)] message = \"Question for <@\" + str(ctx.message.author.id) + \">:\\n\"",
"= trivia_questions[random.randint(0, len(trivia_questions) - 1)] message = \"Question for <@\" + str(ctx.message.author.id) +",
"register first! Do `$register`\") @bot.command() async def register(ctx): discord_id = str(ctx.message.author.id) if discord_id",
"register first! Do `$register`\") return with open(\"../trivia/\" + random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\") as trivia_file: trivia_questions",
"'🇧', '🇨', '🇩'] bot = commands.Bot(command_prefix='$') with open(\"../data.json\") as data_file: data = json.load(data_file)",
"else: raise error @bot.command() @commands.cooldown(1, 60 * 60 * 24, commands.BucketType.user) async def",
"def check(reaction_arg, user_arg): return user_arg == ctx.message.author try: reaction, user = await bot.wait_for('reaction_add',",
"pot_error(ctx, error): await ctx.send('Make sure you have the recipient in the command: `$pot",
"\" \" + question[\"answer\"]) def check(reaction_arg, user_arg): return user_arg == ctx.message.author try: reaction,",
"== recipient.id: await ctx.send(\"You can't pot yourself... :unamused:\") return data[str(recipient.id)][\"points\"] += 100 _save()",
"trivia_answers[answer] + \" \" + question[\"answer\"] + \". You lost 1 :candy:\") _save()",
"\" + trivia_answers[answer] + \" \" + question[\"answer\"] + \". You lost 1",
"await ctx.send(\"Hey, you don't have permission to do that!\") else: await ctx.send(\"You need",
"24 * 30, commands.BucketType.user) async def monthly(ctx): discord_id = str(ctx.message.author.id) if discord_id not",
"user_arg): return user_arg == ctx.message.author try: reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check)",
"+ question[\"answer\"]) def check(reaction_arg, user_arg): return user_arg == ctx.message.author try: reaction, user =",
"discord from discord.ext import commands import json import math import datetime import os",
"def bal(ctx): discord_id = str(ctx.message.author.id) if discord_id in data: await ctx.send(\"You have \"",
"first! Do `$register`\") else: data[discord_id][\"points\"] += 10 await ctx.send(\"You got 1 :chocolate_bar: (equivalent",
"(worth 100 :candy:)\") else: await ctx.send(\"Hey, you don't have permission to do that!\")",
"trivia(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You need to",
"try: reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check) except asyncio.TimeoutError: await ctx.send(\"Trivia question",
"raise error @bot.command() async def pot(ctx, recipient: discord.Member): if str(ctx.message.author.id) in data: if",
"str(ctx.message.author.id) if discord_id in data: await ctx.send(\"You have \" + str(data[discord_id][\"points\"]) + \"",
"' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() @commands.cooldown(1, 60 * 60 * 24",
"question[\"choices\"][unicode_answers.index(str(reaction))] + \" was incorrect... The correct answer was \" + trivia_answers[answer] +",
"was incorrect... The correct answer was \" + trivia_answers[answer] + \" \" +",
"+= 5 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + trivia_answers[answer] + \"",
"question = trivia_questions[random.randint(0, len(trivia_questions) - 1)] message = \"Question for <@\" + str(ctx.message.author.id)",
"else: data[discord_id][\"points\"] += 1 await ctx.send(\"You got 1 :candy:\") _save() @daily.error async def",
"+ str(ctx.message.author.id) + \">:\\n\" + question[\"question\"] + \"\\n\\n\" unicode_answers = unicode_max_answers[:len(question[\"choices\"])] answer =",
"import random trivia_answers = [\":regional_indicator_a:\", \":regional_indicator_b:\", \":regional_indicator_c:\", \":regional_indicator_d:\"] unicode_max_answers = ['🇦', '🇧', '🇨',",
"else: raise error @bot.command() async def pot(ctx, recipient: discord.Member): if str(ctx.message.author.id) in data:",
"+= 10 await ctx.send(\"You got 1 :chocolate_bar: (equivalent to 10 :candy:)\") _save() @monthly.error",
"got 1 :chocolate_bar: (equivalent to 10 :candy:)\") _save() @monthly.error async def monthly_error(ctx, error):",
"monthly(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You need to",
"ctx.send(\"You need to register first! Do `$register`\") return with open(\"../trivia/\" + random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\")",
"lost 1 :candy:\") _save() @bot.command() async def bal(ctx): discord_id = str(ctx.message.author.id) if discord_id",
"json.load(data_file) @bot.event async def on_ready(): print('Logged on as ' + str(bot.user)) @bot.event async",
"ctx.send(\"You are already registered!\") def _save(): with open('../data.json', 'w+') as file_save: json.dump(data, file_save)",
"= await ctx.send(message) for c in unicode_answers: await sent.add_reaction(c) print(trivia_answers[answer] + \" \"",
"import asyncio import discord from discord.ext import commands import json import math import",
"= False await ctx.send(\"You've been registered!\") _save() daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx) else: await ctx.send(\"You are",
"reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check) except asyncio.TimeoutError: await ctx.send(\"Trivia question for",
"commands.Bot(command_prefix='$') with open(\"../data.json\") as data_file: data = json.load(data_file) @bot.event async def on_ready(): print('Logged",
"with open(\"../data.json\") as data_file: data = json.load(data_file) @bot.event async def on_ready(): print('Logged on",
"await ctx.send(\"You've been registered!\") _save() daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx) else: await ctx.send(\"You are already registered!\")",
"else: await ctx.send(\"You are already registered!\") def _save(): with open('../data.json', 'w+') as file_save:",
"ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + trivia_answers[answer] + \" \" + question[\"answer\"]",
"ctx.send(\"You need to register first! Do `$register`\") else: data[discord_id][\"points\"] += 10 await ctx.send(\"You",
"<reponame>nicholasz2510/Polybius<filename>src/polybius.py import asyncio import discord from discord.ext import commands import json import math",
"_save() @bot.command() async def bal(ctx): discord_id = str(ctx.message.author.id) if discord_id in data: await",
"5 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + trivia_answers[answer] + \" \"",
"discord_id = str(ctx.message.author.id) if discord_id in data: await ctx.send(\"You have \" + str(data[discord_id][\"points\"])",
"\" \" + question[\"answer\"] + \". You lost 1 :candy:\") _save() @bot.command() async",
"need to register first! Do `$register`\") @pot.error async def pot_error(ctx, error): await ctx.send('Make",
"== answer: data[discord_id][\"points\"] += 5 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" +",
"+ question[\"choices\"][i] + \"\\n\" sent = await ctx.send(message) for c in unicode_answers: await",
"\":regional_indicator_b:\", \":regional_indicator_c:\", \":regional_indicator_d:\"] unicode_max_answers = ['🇦', '🇧', '🇨', '🇩'] bot = commands.Bot(command_prefix='$') with",
"await ctx.send(\"The person you're giving a :honey_pot: to hasn't registered yet! Tell them",
"with open(\"../trivia/\" + random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\") as trivia_file: trivia_questions = json.load(trivia_file) question = trivia_questions[random.randint(0,",
"+ \"> a :honey_pot:! (worth 100 :candy:)\") else: await ctx.send(\"Hey, you don't have",
"discord_id not in data: await ctx.send(\"You need to register first! Do `$register`\") return",
"was \" + trivia_answers[answer] + \" \" + question[\"answer\"] + \". You lost",
"to 5 :candy:)\") _save() else: data[discord_id][\"points\"] -= 1 await ctx.send(\"<@\" + str(ctx.message.author.id) +",
"bal(ctx): discord_id = str(ctx.message.author.id) if discord_id in data: await ctx.send(\"You have \" +",
"= {} data[discord_id][\"points\"] = 0 data[discord_id][\"honey_potter\"] = False await ctx.send(\"You've been registered!\") _save()",
"1 :candy:\") _save() @bot.command() async def bal(ctx): discord_id = str(ctx.message.author.id) if discord_id in",
"async def pot(ctx, recipient: discord.Member): if str(ctx.message.author.id) in data: if data[str(ctx.message.author.id)][\"honey_potter\"]: if not",
"gave <@\" + str(recipient.id) + \"> a :honey_pot:! (worth 100 :candy:)\") else: await",
"error @bot.command() @commands.cooldown(1, 60 * 60 * 24 * 30, commands.BucketType.user) async def",
"to 10 :candy:)\") _save() @monthly.error async def monthly_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await",
"= commands.Bot(command_prefix='$') with open(\"../data.json\") as data_file: data = json.load(data_file) @bot.event async def on_ready():",
"def pot(ctx, recipient: discord.Member): if str(ctx.message.author.id) in data: if data[str(ctx.message.author.id)][\"honey_potter\"]: if not str(recipient.id)",
"error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your daily reward! Try again",
"already registered!\") def _save(): with open('../data.json', 'w+') as file_save: json.dump(data, file_save) with open('../secret.txt',",
"async def bal(ctx): discord_id = str(ctx.message.author.id) if discord_id in data: await ctx.send(\"You have",
"+ random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\") as trivia_file: trivia_questions = json.load(trivia_file) question = trivia_questions[random.randint(0, len(trivia_questions) -",
"else: await ctx.send(\"You need to register first! Do `$register`\") @bot.command() async def register(ctx):",
"if discord_id not in data: data[discord_id] = {} data[discord_id][\"points\"] = 0 data[discord_id][\"honey_potter\"] =",
"data[discord_id][\"points\"] = 0 data[discord_id][\"honey_potter\"] = False await ctx.send(\"You've been registered!\") _save() daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx)",
"a :honey_pot:! (worth 100 :candy:)\") else: await ctx.send(\"Hey, you don't have permission to",
"@bot.event async def on_ready(): print('Logged on as ' + str(bot.user)) @bot.event async def",
"Do `$register`\") else: data[discord_id][\"points\"] += 1 await ctx.send(\"You got 1 :candy:\") _save() @daily.error",
"do that!\") else: await ctx.send(\"You need to register first! Do `$register`\") @pot.error async",
"else: await ctx.send(\"Hey, you don't have permission to do that!\") else: await ctx.send(\"You",
"+ \" \" + question[\"choices\"][unicode_answers.index(str(reaction))] + \" was incorrect... The correct answer was",
"import json import math import datetime import os import random trivia_answers = [\":regional_indicator_a:\",",
"not in data: await ctx.send(\"You need to register first! Do `$register`\") else: data[discord_id][\"points\"]",
"command: `$pot <recipient>`') @bot.command() async def trivia(ctx): discord_id = str(ctx.message.author.id) if discord_id not",
"\"\\n\" sent = await ctx.send(message) for c in unicode_answers: await sent.add_reaction(c) print(trivia_answers[answer] +",
"if isinstance(error, commands.CommandNotFound): await ctx.send(\"That wasn't a valid command!\\nType `$help` to see a",
"in data: await ctx.send(\"You need to register first! Do `$register`\") else: data[discord_id][\"points\"] +=",
"str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() @commands.cooldown(1, 60 * 60 * 24 * 30,",
"out.\") return if unicode_answers.index(str(reaction)) == answer: data[discord_id][\"points\"] += 5 await ctx.send(\"<@\" + str(ctx.message.author.id)",
"pot yourself... :unamused:\") return data[str(recipient.id)][\"points\"] += 100 _save() await ctx.send(\"Wow! <@\" + str(ctx.message.author.id)",
"wasn't a valid command!\\nType `$help` to see a list of commands.\") else: raise",
"if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your monthly reward! Try again in",
"permission to do that!\") else: await ctx.send(\"You need to register first! Do `$register`\")",
"= \"Question for <@\" + str(ctx.message.author.id) + \">:\\n\" + question[\"question\"] + \"\\n\\n\" unicode_answers",
"+ trivia_answers[answer] + \" \" + question[\"answer\"] + \". You lost 1 :candy:\")",
"10 :candy:)\") _save() @monthly.error async def monthly_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You",
":candy:\") _save() @bot.command() async def bal(ctx): discord_id = str(ctx.message.author.id) if discord_id in data:",
"again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() async def pot(ctx, recipient:",
"have permission to do that!\") else: await ctx.send(\"You need to register first! Do",
"if unicode_answers.index(str(reaction)) == answer: data[discord_id][\"points\"] += 5 await ctx.send(\"<@\" + str(ctx.message.author.id) + \">",
"person you're giving a :honey_pot: to hasn't registered yet! Tell them to do",
"async def daily(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You",
"await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + str(reaction) + \" \" +",
"ctx.send('You already claimed your monthly reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else:",
"unicode_answers.index(str(reaction)) == answer: data[discord_id][\"points\"] += 5 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \"",
"60 * 24 * 30, commands.BucketType.user) async def monthly(ctx): discord_id = str(ctx.message.author.id) if",
"unicode_answers = unicode_max_answers[:len(question[\"choices\"])] answer = question[\"choices\"].index(question[\"answer\"]) for i in range(len(question[\"choices\"])): message += trivia_answers[i]",
"`$register`\") @pot.error async def pot_error(ctx, error): await ctx.send('Make sure you have the recipient",
"in the command: `$pot <recipient>`') @bot.command() async def trivia(ctx): discord_id = str(ctx.message.author.id) if",
"Do `$register`\") @bot.command() async def register(ctx): discord_id = str(ctx.message.author.id) if discord_id not in",
"data: if data[str(ctx.message.author.id)][\"honey_potter\"]: if not str(recipient.id) in data: await ctx.send(\"The person you're giving",
"data: data[discord_id] = {} data[discord_id][\"points\"] = 0 data[discord_id][\"honey_potter\"] = False await ctx.send(\"You've been",
"await sent.add_reaction(c) print(trivia_answers[answer] + \" \" + question[\"answer\"]) def check(reaction_arg, user_arg): return user_arg",
"c in unicode_answers: await sent.add_reaction(c) print(trivia_answers[answer] + \" \" + question[\"answer\"]) def check(reaction_arg,",
"def daily_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your daily reward!",
"\" + question[\"choices\"][unicode_answers.index(str(reaction))] + \" was incorrect... The correct answer was \" +",
"registered yet! Tell them to do `$register`\") return if ctx.message.author.id == recipient.id: await",
"message += trivia_answers[i] + \" \" + question[\"choices\"][i] + \"\\n\" sent = await",
"from discord.ext import commands import json import math import datetime import os import",
":candy:\") _save() @daily.error async def daily_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already",
"unicode_max_answers = ['🇦', '🇧', '🇨', '🇩'] bot = commands.Bot(command_prefix='$') with open(\"../data.json\") as data_file:",
"str(ctx.message.author.id) in data: if data[str(ctx.message.author.id)][\"honey_potter\"]: if not str(recipient.id) in data: await ctx.send(\"The person",
"+ \" was correct! You received 1 :lollipop: (equivalent to 5 :candy:)\") _save()",
"= [\":regional_indicator_a:\", \":regional_indicator_b:\", \":regional_indicator_c:\", \":regional_indicator_d:\"] unicode_max_answers = ['🇦', '🇧', '🇨', '🇩'] bot =",
"async def register(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: data[discord_id] =",
"\" \" + question[\"answer\"] + \" was correct! You received 1 :lollipop: (equivalent",
"if not str(recipient.id) in data: await ctx.send(\"The person you're giving a :honey_pot: to",
"encoding=\"utf-8\") as trivia_file: trivia_questions = json.load(trivia_file) question = trivia_questions[random.randint(0, len(trivia_questions) - 1)] message",
"list of commands.\") else: raise error @bot.command() @commands.cooldown(1, 60 * 60 * 24,",
"hasn't registered yet! Tell them to do `$register`\") return if ctx.message.author.id == recipient.id:",
"@commands.cooldown(1, 60 * 60 * 24, commands.BucketType.user) async def daily(ctx): discord_id = str(ctx.message.author.id)",
"`$help` to see a list of commands.\") else: raise error @bot.command() @commands.cooldown(1, 60",
"+ \" \" + question[\"answer\"] + \" was correct! You received 1 :lollipop:",
"@bot.command() async def bal(ctx): discord_id = str(ctx.message.author.id) if discord_id in data: await ctx.send(\"You",
"5 :candy:)\") _save() else: data[discord_id][\"points\"] -= 1 await ctx.send(\"<@\" + str(ctx.message.author.id) + \">",
"await ctx.send(\"You need to register first! Do `$register`\") else: data[discord_id][\"points\"] += 1 await",
"'🇨', '🇩'] bot = commands.Bot(command_prefix='$') with open(\"../data.json\") as data_file: data = json.load(data_file) @bot.event",
"\"> a :honey_pot:! (worth 100 :candy:)\") else: await ctx.send(\"Hey, you don't have permission",
"1 :lollipop: (equivalent to 5 :candy:)\") _save() else: data[discord_id][\"points\"] -= 1 await ctx.send(\"<@\"",
"import datetime import os import random trivia_answers = [\":regional_indicator_a:\", \":regional_indicator_b:\", \":regional_indicator_c:\", \":regional_indicator_d:\"] unicode_max_answers",
"recipient: discord.Member): if str(ctx.message.author.id) in data: if data[str(ctx.message.author.id)][\"honey_potter\"]: if not str(recipient.id) in data:",
"= json.load(trivia_file) question = trivia_questions[random.randint(0, len(trivia_questions) - 1)] message = \"Question for <@\"",
"* 60 * 24, commands.BucketType.user) async def daily(ctx): discord_id = str(ctx.message.author.id) if discord_id",
"+ str(ctx.message.author.id) + \"> \" + trivia_answers[answer] + \" \" + question[\"answer\"] +",
"= str(ctx.message.author.id) if discord_id not in data: data[discord_id] = {} data[discord_id][\"points\"] = 0",
"await ctx.send(\"Wow! <@\" + str(ctx.message.author.id) + \"> just gave <@\" + str(recipient.id) +",
"print(trivia_answers[answer] + \" \" + question[\"answer\"]) def check(reaction_arg, user_arg): return user_arg == ctx.message.author",
"you don't have permission to do that!\") else: await ctx.send(\"You need to register",
"you have the recipient in the command: `$pot <recipient>`') @bot.command() async def trivia(ctx):",
"\" \" + question[\"choices\"][unicode_answers.index(str(reaction))] + \" was incorrect... The correct answer was \"",
"math import datetime import os import random trivia_answers = [\":regional_indicator_a:\", \":regional_indicator_b:\", \":regional_indicator_c:\", \":regional_indicator_d:\"]",
"bot.wait_for('reaction_add', timeout=60.0, check=check) except asyncio.TimeoutError: await ctx.send(\"Trivia question for <@\" + str(ctx.message.author.id) +",
"to see a list of commands.\") else: raise error @bot.command() @commands.cooldown(1, 60 *",
"@bot.command() @commands.cooldown(1, 60 * 60 * 24, commands.BucketType.user) async def daily(ctx): discord_id =",
"- 1)] message = \"Question for <@\" + str(ctx.message.author.id) + \">:\\n\" + question[\"question\"]",
"open(\"../trivia/\" + random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\") as trivia_file: trivia_questions = json.load(trivia_file) question = trivia_questions[random.randint(0, len(trivia_questions)",
"import os import random trivia_answers = [\":regional_indicator_a:\", \":regional_indicator_b:\", \":regional_indicator_c:\", \":regional_indicator_d:\"] unicode_max_answers = ['🇦',",
"data: await ctx.send(\"The person you're giving a :honey_pot: to hasn't registered yet! Tell",
"discord.ext import commands import json import math import datetime import os import random",
"+ \"> just gave <@\" + str(recipient.id) + \"> a :honey_pot:! (worth 100",
"1 :chocolate_bar: (equivalent to 10 :candy:)\") _save() @monthly.error async def monthly_error(ctx, error): if",
"<@\" + str(ctx.message.author.id) + \"> timed out.\") return if unicode_answers.index(str(reaction)) == answer: data[discord_id][\"points\"]",
"discord_id in data: await ctx.send(\"You have \" + str(data[discord_id][\"points\"]) + \" :candy:\") else:",
"`$register`\") @bot.command() async def register(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data:",
"100 :candy:)\") else: await ctx.send(\"Hey, you don't have permission to do that!\") else:",
"you're giving a :honey_pot: to hasn't registered yet! Tell them to do `$register`\")",
"if discord_id in data: await ctx.send(\"You have \" + str(data[discord_id][\"points\"]) + \" :candy:\")",
"+ \"\\n\\n\" unicode_answers = unicode_max_answers[:len(question[\"choices\"])] answer = question[\"choices\"].index(question[\"answer\"]) for i in range(len(question[\"choices\"])): message",
"def trivia(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You need",
"have the recipient in the command: `$pot <recipient>`') @bot.command() async def trivia(ctx): discord_id",
"str(ctx.message.author.id) if discord_id not in data: data[discord_id] = {} data[discord_id][\"points\"] = 0 data[discord_id][\"honey_potter\"]",
"\" + question[\"choices\"][i] + \"\\n\" sent = await ctx.send(message) for c in unicode_answers:",
"the command: `$pot <recipient>`') @bot.command() async def trivia(ctx): discord_id = str(ctx.message.author.id) if discord_id",
"range(len(question[\"choices\"])): message += trivia_answers[i] + \" \" + question[\"choices\"][i] + \"\\n\" sent =",
"answer: data[discord_id][\"points\"] += 5 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + trivia_answers[answer]",
"ctx.send(\"Hey, you don't have permission to do that!\") else: await ctx.send(\"You need to",
"os import random trivia_answers = [\":regional_indicator_a:\", \":regional_indicator_b:\", \":regional_indicator_c:\", \":regional_indicator_d:\"] unicode_max_answers = ['🇦', '🇧',",
"await ctx.send(\"You have \" + str(data[discord_id][\"points\"]) + \" :candy:\") else: await ctx.send(\"You need",
"monthly.reset_cooldown(ctx) else: await ctx.send(\"You are already registered!\") def _save(): with open('../data.json', 'w+') as",
"in data: await ctx.send(\"You have \" + str(data[discord_id][\"points\"]) + \" :candy:\") else: await",
"@bot.event async def on_command_error(ctx, error): if isinstance(error, commands.CommandNotFound): await ctx.send(\"That wasn't a valid",
"Do `$register`\") return with open(\"../trivia/\" + random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\") as trivia_file: trivia_questions = json.load(trivia_file)",
"except asyncio.TimeoutError: await ctx.send(\"Trivia question for <@\" + str(ctx.message.author.id) + \"> timed out.\")",
"ctx.send(\"You've been registered!\") _save() daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx) else: await ctx.send(\"You are already registered!\") def",
"isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your daily reward! Try again in '",
"+ str(reaction) + \" \" + question[\"choices\"][unicode_answers.index(str(reaction))] + \" was incorrect... The correct",
"that!\") else: await ctx.send(\"You need to register first! Do `$register`\") @pot.error async def",
"commands.CommandOnCooldown): await ctx.send('You already claimed your monthly reward! Try again in ' +",
"ctx.send('You already claimed your daily reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else:",
"* 24 * 30, commands.BucketType.user) async def monthly(ctx): discord_id = str(ctx.message.author.id) if discord_id",
"\"> timed out.\") return if unicode_answers.index(str(reaction)) == answer: data[discord_id][\"points\"] += 5 await ctx.send(\"<@\"",
"user_arg == ctx.message.author try: reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check) except asyncio.TimeoutError:",
"def daily(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You need",
"<@\" + str(ctx.message.author.id) + \"> just gave <@\" + str(recipient.id) + \"> a",
"async def daily_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your daily",
"60 * 60 * 24 * 30, commands.BucketType.user) async def monthly(ctx): discord_id =",
"+ trivia_answers[answer] + \" \" + question[\"answer\"] + \" was correct! You received",
"def monthly_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your monthly reward!",
"ctx.send(\"That wasn't a valid command!\\nType `$help` to see a list of commands.\") else:",
"str(ctx.message.author.id) + \"> timed out.\") return if unicode_answers.index(str(reaction)) == answer: data[discord_id][\"points\"] += 5",
"= await bot.wait_for('reaction_add', timeout=60.0, check=check) except asyncio.TimeoutError: await ctx.send(\"Trivia question for <@\" +",
"async def on_ready(): print('Logged on as ' + str(bot.user)) @bot.event async def on_command_error(ctx,",
"to do `$register`\") return if ctx.message.author.id == recipient.id: await ctx.send(\"You can't pot yourself...",
"claimed your daily reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error",
"_save() daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx) else: await ctx.send(\"You are already registered!\") def _save(): with open('../data.json',",
"error @bot.command() @commands.cooldown(1, 60 * 60 * 24, commands.BucketType.user) async def daily(ctx): discord_id",
"data[discord_id][\"honey_potter\"] = False await ctx.send(\"You've been registered!\") _save() daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx) else: await ctx.send(\"You",
"len(trivia_questions) - 1)] message = \"Question for <@\" + str(ctx.message.author.id) + \">:\\n\" +",
"in range(len(question[\"choices\"])): message += trivia_answers[i] + \" \" + question[\"choices\"][i] + \"\\n\" sent",
"data[discord_id][\"points\"] += 5 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + trivia_answers[answer] +",
"monthly_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your monthly reward! Try",
"Do `$register`\") else: data[discord_id][\"points\"] += 10 await ctx.send(\"You got 1 :chocolate_bar: (equivalent to",
"them to do `$register`\") return if ctx.message.author.id == recipient.id: await ctx.send(\"You can't pot",
"to register first! Do `$register`\") else: data[discord_id][\"points\"] += 10 await ctx.send(\"You got 1",
"question[\"choices\"].index(question[\"answer\"]) for i in range(len(question[\"choices\"])): message += trivia_answers[i] + \" \" + question[\"choices\"][i]",
"if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your daily reward! Try again in",
"async def monthly(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You",
"10 await ctx.send(\"You got 1 :chocolate_bar: (equivalent to 10 :candy:)\") _save() @monthly.error async",
"are already registered!\") def _save(): with open('../data.json', 'w+') as file_save: json.dump(data, file_save) with",
"command!\\nType `$help` to see a list of commands.\") else: raise error @bot.command() @commands.cooldown(1,",
"+ \"> \" + str(reaction) + \" \" + question[\"choices\"][unicode_answers.index(str(reaction))] + \" was",
"\" :candy:\") else: await ctx.send(\"You need to register first! Do `$register`\") @bot.command() async",
"data[str(ctx.message.author.id)][\"honey_potter\"]: if not str(recipient.id) in data: await ctx.send(\"The person you're giving a :honey_pot:",
"ctx.send(\"You got 1 :candy:\") _save() @daily.error async def daily_error(ctx, error): if isinstance(error, commands.CommandOnCooldown):",
"+ question[\"answer\"] + \" was correct! You received 1 :lollipop: (equivalent to 5",
"in data: await ctx.send(\"You need to register first! Do `$register`\") return with open(\"../trivia/\"",
"in data: data[discord_id] = {} data[discord_id][\"points\"] = 0 data[discord_id][\"honey_potter\"] = False await ctx.send(\"You've",
"else: raise error @bot.command() @commands.cooldown(1, 60 * 60 * 24 * 30, commands.BucketType.user)",
"sure you have the recipient in the command: `$pot <recipient>`') @bot.command() async def",
"ctx.send(\"You have \" + str(data[discord_id][\"points\"]) + \" :candy:\") else: await ctx.send(\"You need to",
"trivia_questions = json.load(trivia_file) question = trivia_questions[random.randint(0, len(trivia_questions) - 1)] message = \"Question for",
"asyncio.TimeoutError: await ctx.send(\"Trivia question for <@\" + str(ctx.message.author.id) + \"> timed out.\") return",
"with open('../data.json', 'w+') as file_save: json.dump(data, file_save) with open('../secret.txt', 'r') as f: bot.run(f.readline())",
"await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + trivia_answers[answer] + \" \" +",
"data: await ctx.send(\"You need to register first! Do `$register`\") return with open(\"../trivia/\" +",
"not in data: await ctx.send(\"You need to register first! Do `$register`\") return with",
"registered!\") def _save(): with open('../data.json', 'w+') as file_save: json.dump(data, file_save) with open('../secret.txt', 'r')",
"as ' + str(bot.user)) @bot.event async def on_command_error(ctx, error): if isinstance(error, commands.CommandNotFound): await",
"check(reaction_arg, user_arg): return user_arg == ctx.message.author try: reaction, user = await bot.wait_for('reaction_add', timeout=60.0,",
"monthly reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() async",
"ctx.send(\"You need to register first! Do `$register`\") @bot.command() async def register(ctx): discord_id =",
"\" + str(reaction) + \" \" + question[\"choices\"][unicode_answers.index(str(reaction))] + \" was incorrect... The",
"discord_id = str(ctx.message.author.id) if discord_id not in data: data[discord_id] = {} data[discord_id][\"points\"] =",
"await bot.wait_for('reaction_add', timeout=60.0, check=check) except asyncio.TimeoutError: await ctx.send(\"Trivia question for <@\" + str(ctx.message.author.id)",
"+ str(ctx.message.author.id) + \"> timed out.\") return if unicode_answers.index(str(reaction)) == answer: data[discord_id][\"points\"] +=",
":lollipop: (equivalent to 5 :candy:)\") _save() else: data[discord_id][\"points\"] -= 1 await ctx.send(\"<@\" +",
"def on_ready(): print('Logged on as ' + str(bot.user)) @bot.event async def on_command_error(ctx, error):",
"@bot.command() async def trivia(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await",
"def pot_error(ctx, error): await ctx.send('Make sure you have the recipient in the command:",
"yet! Tell them to do `$register`\") return if ctx.message.author.id == recipient.id: await ctx.send(\"You",
"\"> \" + trivia_answers[answer] + \" \" + question[\"answer\"] + \" was correct!",
"(equivalent to 10 :candy:)\") _save() @monthly.error async def monthly_error(ctx, error): if isinstance(error, commands.CommandOnCooldown):",
"_save() await ctx.send(\"Wow! <@\" + str(ctx.message.author.id) + \"> just gave <@\" + str(recipient.id)",
"100 _save() await ctx.send(\"Wow! <@\" + str(ctx.message.author.id) + \"> just gave <@\" +",
"+ str(data[discord_id][\"points\"]) + \" :candy:\") else: await ctx.send(\"You need to register first! Do",
"first! Do `$register`\") @pot.error async def pot_error(ctx, error): await ctx.send('Make sure you have",
"async def monthly_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your monthly",
"data = json.load(data_file) @bot.event async def on_ready(): print('Logged on as ' + str(bot.user))",
"data: await ctx.send(\"You need to register first! Do `$register`\") else: data[discord_id][\"points\"] += 1",
"def on_command_error(ctx, error): if isinstance(error, commands.CommandNotFound): await ctx.send(\"That wasn't a valid command!\\nType `$help`",
"was correct! You received 1 :lollipop: (equivalent to 5 :candy:)\") _save() else: data[discord_id][\"points\"]",
"for <@\" + str(ctx.message.author.id) + \">:\\n\" + question[\"question\"] + \"\\n\\n\" unicode_answers = unicode_max_answers[:len(question[\"choices\"])]",
"+= trivia_answers[i] + \" \" + question[\"choices\"][i] + \"\\n\" sent = await ctx.send(message)",
"data[discord_id][\"points\"] += 1 await ctx.send(\"You got 1 :candy:\") _save() @daily.error async def daily_error(ctx,",
"ctx.send(\"The person you're giving a :honey_pot: to hasn't registered yet! Tell them to",
"await ctx.send(\"You need to register first! Do `$register`\") @bot.command() async def register(ctx): discord_id",
"user = await bot.wait_for('reaction_add', timeout=60.0, check=check) except asyncio.TimeoutError: await ctx.send(\"Trivia question for <@\"",
"error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your monthly reward! Try again",
"@bot.command() async def pot(ctx, recipient: discord.Member): if str(ctx.message.author.id) in data: if data[str(ctx.message.author.id)][\"honey_potter\"]: if",
"+ question[\"answer\"] + \". You lost 1 :candy:\") _save() @bot.command() async def bal(ctx):",
"24, commands.BucketType.user) async def daily(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data:",
"correct answer was \" + trivia_answers[answer] + \" \" + question[\"answer\"] + \".",
"ctx.message.author.id == recipient.id: await ctx.send(\"You can't pot yourself... :unamused:\") return data[str(recipient.id)][\"points\"] += 100",
"+ question[\"question\"] + \"\\n\\n\" unicode_answers = unicode_max_answers[:len(question[\"choices\"])] answer = question[\"choices\"].index(question[\"answer\"]) for i in",
"0 data[discord_id][\"honey_potter\"] = False await ctx.send(\"You've been registered!\") _save() daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx) else: await",
"+ str(ctx.message.author.id) + \"> just gave <@\" + str(recipient.id) + \"> a :honey_pot:!",
"str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You need to register first! Do",
"not str(recipient.id) in data: await ctx.send(\"The person you're giving a :honey_pot: to hasn't",
"await ctx.send('You already claimed your daily reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after))))",
"register first! Do `$register`\") else: data[discord_id][\"points\"] += 10 await ctx.send(\"You got 1 :chocolate_bar:",
"\" \" + question[\"choices\"][i] + \"\\n\" sent = await ctx.send(message) for c in",
"for <@\" + str(ctx.message.author.id) + \"> timed out.\") return if unicode_answers.index(str(reaction)) == answer:",
"+ \"> timed out.\") return if unicode_answers.index(str(reaction)) == answer: data[discord_id][\"points\"] += 5 await",
"data[discord_id][\"points\"] += 10 await ctx.send(\"You got 1 :chocolate_bar: (equivalent to 10 :candy:)\") _save()",
"trivia_answers[answer] + \" \" + question[\"answer\"] + \" was correct! You received 1",
"_save() else: data[discord_id][\"points\"] -= 1 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" +",
"str(ctx.message.author.id) + \"> \" + str(reaction) + \" \" + question[\"choices\"][unicode_answers.index(str(reaction))] + \"",
"to register first! Do `$register`\") @bot.command() async def register(ctx): discord_id = str(ctx.message.author.id) if",
"data[str(recipient.id)][\"points\"] += 100 _save() await ctx.send(\"Wow! <@\" + str(ctx.message.author.id) + \"> just gave",
"trivia_answers = [\":regional_indicator_a:\", \":regional_indicator_b:\", \":regional_indicator_c:\", \":regional_indicator_d:\"] unicode_max_answers = ['🇦', '🇧', '🇨', '🇩'] bot",
"error): if isinstance(error, commands.CommandNotFound): await ctx.send(\"That wasn't a valid command!\\nType `$help` to see",
"got 1 :candy:\") _save() @daily.error async def daily_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await",
"= 0 data[discord_id][\"honey_potter\"] = False await ctx.send(\"You've been registered!\") _save() daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx) else:",
"question[\"answer\"]) def check(reaction_arg, user_arg): return user_arg == ctx.message.author try: reaction, user = await",
"`$register`\") else: data[discord_id][\"points\"] += 10 await ctx.send(\"You got 1 :chocolate_bar: (equivalent to 10",
"in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() @commands.cooldown(1, 60 * 60 *",
"' + str(bot.user)) @bot.event async def on_command_error(ctx, error): if isinstance(error, commands.CommandNotFound): await ctx.send(\"That",
"await ctx.send(\"Trivia question for <@\" + str(ctx.message.author.id) + \"> timed out.\") return if",
":candy:)\") _save() else: data[discord_id][\"points\"] -= 1 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \"",
":candy:)\") _save() @monthly.error async def monthly_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already",
"not in data: data[discord_id] = {} data[discord_id][\"points\"] = 0 data[discord_id][\"honey_potter\"] = False await",
"reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() @commands.cooldown(1, 60",
"' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() async def pot(ctx, recipient: discord.Member): if",
"question[\"question\"] + \"\\n\\n\" unicode_answers = unicode_max_answers[:len(question[\"choices\"])] answer = question[\"choices\"].index(question[\"answer\"]) for i in range(len(question[\"choices\"])):",
"to register first! Do `$register`\") return with open(\"../trivia/\" + random.choice(os.listdir(\"../trivia\")), encoding=\"utf-8\") as trivia_file:",
"data[discord_id] = {} data[discord_id][\"points\"] = 0 data[discord_id][\"honey_potter\"] = False await ctx.send(\"You've been registered!\")",
"\". You lost 1 :candy:\") _save() @bot.command() async def bal(ctx): discord_id = str(ctx.message.author.id)",
"daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx) else: await ctx.send(\"You are already registered!\") def _save(): with open('../data.json', 'w+')",
"ctx.message.author try: reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check) except asyncio.TimeoutError: await ctx.send(\"Trivia",
"= ['🇦', '🇧', '🇨', '🇩'] bot = commands.Bot(command_prefix='$') with open(\"../data.json\") as data_file: data",
":candy:\") else: await ctx.send(\"You need to register first! Do `$register`\") @bot.command() async def",
"incorrect... The correct answer was \" + trivia_answers[answer] + \" \" + question[\"answer\"]",
"commands.BucketType.user) async def monthly(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await",
"await ctx.send(\"That wasn't a valid command!\\nType `$help` to see a list of commands.\")",
"recipient.id: await ctx.send(\"You can't pot yourself... :unamused:\") return data[str(recipient.id)][\"points\"] += 100 _save() await",
"to do that!\") else: await ctx.send(\"You need to register first! Do `$register`\") @pot.error",
"await ctx.send('Make sure you have the recipient in the command: `$pot <recipient>`') @bot.command()",
"* 30, commands.BucketType.user) async def monthly(ctx): discord_id = str(ctx.message.author.id) if discord_id not in",
"trivia_file: trivia_questions = json.load(trivia_file) question = trivia_questions[random.randint(0, len(trivia_questions) - 1)] message = \"Question",
"1 :candy:\") _save() @daily.error async def daily_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You",
"asyncio import discord from discord.ext import commands import json import math import datetime",
"again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() @commands.cooldown(1, 60 * 60",
"You lost 1 :candy:\") _save() @bot.command() async def bal(ctx): discord_id = str(ctx.message.author.id) if",
"str(reaction) + \" \" + question[\"choices\"][unicode_answers.index(str(reaction))] + \" was incorrect... The correct answer",
"str(data[discord_id][\"points\"]) + \" :candy:\") else: await ctx.send(\"You need to register first! Do `$register`\")",
"data[discord_id][\"points\"] -= 1 await ctx.send(\"<@\" + str(ctx.message.author.id) + \"> \" + str(reaction) +",
"@daily.error async def daily_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your",
"str(recipient.id) + \"> a :honey_pot:! (worth 100 :candy:)\") else: await ctx.send(\"Hey, you don't",
"import math import datetime import os import random trivia_answers = [\":regional_indicator_a:\", \":regional_indicator_b:\", \":regional_indicator_c:\",",
"for c in unicode_answers: await sent.add_reaction(c) print(trivia_answers[answer] + \" \" + question[\"answer\"]) def",
"\" + question[\"answer\"] + \" was correct! You received 1 :lollipop: (equivalent to",
":unamused:\") return data[str(recipient.id)][\"points\"] += 100 _save() await ctx.send(\"Wow! <@\" + str(ctx.message.author.id) + \">",
"def _save(): with open('../data.json', 'w+') as file_save: json.dump(data, file_save) with open('../secret.txt', 'r') as",
"`$pot <recipient>`') @bot.command() async def trivia(ctx): discord_id = str(ctx.message.author.id) if discord_id not in",
"random trivia_answers = [\":regional_indicator_a:\", \":regional_indicator_b:\", \":regional_indicator_c:\", \":regional_indicator_d:\"] unicode_max_answers = ['🇦', '🇧', '🇨', '🇩']",
"already claimed your monthly reward! Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise",
"@monthly.error async def monthly_error(ctx, error): if isinstance(error, commands.CommandOnCooldown): await ctx.send('You already claimed your",
"return user_arg == ctx.message.author try: reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check) except",
"first! Do `$register`\") @bot.command() async def register(ctx): discord_id = str(ctx.message.author.id) if discord_id not",
"registered!\") _save() daily.reset_cooldown(ctx) monthly.reset_cooldown(ctx) else: await ctx.send(\"You are already registered!\") def _save(): with",
"\" was correct! You received 1 :lollipop: (equivalent to 5 :candy:)\") _save() else:",
"of commands.\") else: raise error @bot.command() @commands.cooldown(1, 60 * 60 * 24, commands.BucketType.user)",
"str(recipient.id) in data: await ctx.send(\"The person you're giving a :honey_pot: to hasn't registered",
"need to register first! Do `$register`\") @bot.command() async def register(ctx): discord_id = str(ctx.message.author.id)",
"\">:\\n\" + question[\"question\"] + \"\\n\\n\" unicode_answers = unicode_max_answers[:len(question[\"choices\"])] answer = question[\"choices\"].index(question[\"answer\"]) for i",
"+ \" \" + question[\"answer\"] + \". You lost 1 :candy:\") _save() @bot.command()",
"on_command_error(ctx, error): if isinstance(error, commands.CommandNotFound): await ctx.send(\"That wasn't a valid command!\\nType `$help` to",
"await ctx.send(\"You are already registered!\") def _save(): with open('../data.json', 'w+') as file_save: json.dump(data,",
"['🇦', '🇧', '🇨', '🇩'] bot = commands.Bot(command_prefix='$') with open(\"../data.json\") as data_file: data =",
"don't have permission to do that!\") else: await ctx.send(\"You need to register first!",
"Try again in ' + str(datetime.timedelta(seconds=math.floor(error.retry_after)))) else: raise error @bot.command() @commands.cooldown(1, 60 *",
"async def trivia(ctx): discord_id = str(ctx.message.author.id) if discord_id not in data: await ctx.send(\"You"
] |
[
"# -*- coding: utf-8 -*- \"\"\" @date: 2020/7/14 下午8:34 @file: processor.py @author: zj",
"output_data = self.parser.process() if verbose: logger.info('Save data in specified format') self.creator.save(output_data) if verbose:",
"data with specified format \"\"\" def __init__(self, cfg): self.parser = build_anno(cfg.ANNO.PARSER, cfg) self.creator",
"from ..anno import build_anno from ..util.logger import setup_logger class Processor(object): \"\"\" The labeled",
"is processed to create training data with specified format \"\"\" def __init__(self, cfg):",
"self.logger if verbose: logger.info('Processing original data') output_data = self.parser.process() if verbose: logger.info('Save data",
"@date: 2020/7/14 下午8:34 @file: processor.py @author: zj @description: \"\"\" from ..anno import build_anno",
"processor.py @author: zj @description: \"\"\" from ..anno import build_anno from ..util.logger import setup_logger",
"specified format \"\"\" def __init__(self, cfg): self.parser = build_anno(cfg.ANNO.PARSER, cfg) self.creator = build_anno(cfg.ANNO.CREATOR,",
"= self.logger if verbose: logger.info('Processing original data') output_data = self.parser.process() if verbose: logger.info('Save",
"cfg) self.creator = build_anno(cfg.ANNO.CREATOR, cfg) self.logger = setup_logger(__name__) self.verbose = cfg.ANNO.VERBOSE def process(self):",
"def __init__(self, cfg): self.parser = build_anno(cfg.ANNO.PARSER, cfg) self.creator = build_anno(cfg.ANNO.CREATOR, cfg) self.logger =",
"logger = self.logger if verbose: logger.info('Processing original data') output_data = self.parser.process() if verbose:",
"setup_logger class Processor(object): \"\"\" The labeled data is processed to create training data",
"original data') output_data = self.parser.process() if verbose: logger.info('Save data in specified format') self.creator.save(output_data)",
"-*- coding: utf-8 -*- \"\"\" @date: 2020/7/14 下午8:34 @file: processor.py @author: zj @description:",
"2020/7/14 下午8:34 @file: processor.py @author: zj @description: \"\"\" from ..anno import build_anno from",
"\"\"\" from ..anno import build_anno from ..util.logger import setup_logger class Processor(object): \"\"\" The",
"import setup_logger class Processor(object): \"\"\" The labeled data is processed to create training",
"build_anno from ..util.logger import setup_logger class Processor(object): \"\"\" The labeled data is processed",
"labeled data is processed to create training data with specified format \"\"\" def",
"@author: zj @description: \"\"\" from ..anno import build_anno from ..util.logger import setup_logger class",
"self.creator = build_anno(cfg.ANNO.CREATOR, cfg) self.logger = setup_logger(__name__) self.verbose = cfg.ANNO.VERBOSE def process(self): verbose",
"self.verbose logger = self.logger if verbose: logger.info('Processing original data') output_data = self.parser.process() if",
"from ..util.logger import setup_logger class Processor(object): \"\"\" The labeled data is processed to",
"data') output_data = self.parser.process() if verbose: logger.info('Save data in specified format') self.creator.save(output_data) if",
"= build_anno(cfg.ANNO.CREATOR, cfg) self.logger = setup_logger(__name__) self.verbose = cfg.ANNO.VERBOSE def process(self): verbose =",
"verbose: logger.info('Processing original data') output_data = self.parser.process() if verbose: logger.info('Save data in specified",
"self.verbose = cfg.ANNO.VERBOSE def process(self): verbose = self.verbose logger = self.logger if verbose:",
"cfg.ANNO.VERBOSE def process(self): verbose = self.verbose logger = self.logger if verbose: logger.info('Processing original",
"create training data with specified format \"\"\" def __init__(self, cfg): self.parser = build_anno(cfg.ANNO.PARSER,",
"..util.logger import setup_logger class Processor(object): \"\"\" The labeled data is processed to create",
"= self.verbose logger = self.logger if verbose: logger.info('Processing original data') output_data = self.parser.process()",
"= cfg.ANNO.VERBOSE def process(self): verbose = self.verbose logger = self.logger if verbose: logger.info('Processing",
"\"\"\" The labeled data is processed to create training data with specified format",
"The labeled data is processed to create training data with specified format \"\"\"",
"utf-8 -*- \"\"\" @date: 2020/7/14 下午8:34 @file: processor.py @author: zj @description: \"\"\" from",
"coding: utf-8 -*- \"\"\" @date: 2020/7/14 下午8:34 @file: processor.py @author: zj @description: \"\"\"",
"processed to create training data with specified format \"\"\" def __init__(self, cfg): self.parser",
"= build_anno(cfg.ANNO.PARSER, cfg) self.creator = build_anno(cfg.ANNO.CREATOR, cfg) self.logger = setup_logger(__name__) self.verbose = cfg.ANNO.VERBOSE",
"data is processed to create training data with specified format \"\"\" def __init__(self,",
"format \"\"\" def __init__(self, cfg): self.parser = build_anno(cfg.ANNO.PARSER, cfg) self.creator = build_anno(cfg.ANNO.CREATOR, cfg)",
"\"\"\" def __init__(self, cfg): self.parser = build_anno(cfg.ANNO.PARSER, cfg) self.creator = build_anno(cfg.ANNO.CREATOR, cfg) self.logger",
"cfg): self.parser = build_anno(cfg.ANNO.PARSER, cfg) self.creator = build_anno(cfg.ANNO.CREATOR, cfg) self.logger = setup_logger(__name__) self.verbose",
"self.parser = build_anno(cfg.ANNO.PARSER, cfg) self.creator = build_anno(cfg.ANNO.CREATOR, cfg) self.logger = setup_logger(__name__) self.verbose =",
"build_anno(cfg.ANNO.CREATOR, cfg) self.logger = setup_logger(__name__) self.verbose = cfg.ANNO.VERBOSE def process(self): verbose = self.verbose",
"to create training data with specified format \"\"\" def __init__(self, cfg): self.parser =",
"process(self): verbose = self.verbose logger = self.logger if verbose: logger.info('Processing original data') output_data",
"class Processor(object): \"\"\" The labeled data is processed to create training data with",
"__init__(self, cfg): self.parser = build_anno(cfg.ANNO.PARSER, cfg) self.creator = build_anno(cfg.ANNO.CREATOR, cfg) self.logger = setup_logger(__name__)",
"cfg) self.logger = setup_logger(__name__) self.verbose = cfg.ANNO.VERBOSE def process(self): verbose = self.verbose logger",
"with specified format \"\"\" def __init__(self, cfg): self.parser = build_anno(cfg.ANNO.PARSER, cfg) self.creator =",
"def process(self): verbose = self.verbose logger = self.logger if verbose: logger.info('Processing original data')",
"下午8:34 @file: processor.py @author: zj @description: \"\"\" from ..anno import build_anno from ..util.logger",
"if verbose: logger.info('Processing original data') output_data = self.parser.process() if verbose: logger.info('Save data in",
"Processor(object): \"\"\" The labeled data is processed to create training data with specified",
"@file: processor.py @author: zj @description: \"\"\" from ..anno import build_anno from ..util.logger import",
"..anno import build_anno from ..util.logger import setup_logger class Processor(object): \"\"\" The labeled data",
"\"\"\" @date: 2020/7/14 下午8:34 @file: processor.py @author: zj @description: \"\"\" from ..anno import",
"verbose = self.verbose logger = self.logger if verbose: logger.info('Processing original data') output_data =",
"import build_anno from ..util.logger import setup_logger class Processor(object): \"\"\" The labeled data is",
"zj @description: \"\"\" from ..anno import build_anno from ..util.logger import setup_logger class Processor(object):",
"@description: \"\"\" from ..anno import build_anno from ..util.logger import setup_logger class Processor(object): \"\"\"",
"build_anno(cfg.ANNO.PARSER, cfg) self.creator = build_anno(cfg.ANNO.CREATOR, cfg) self.logger = setup_logger(__name__) self.verbose = cfg.ANNO.VERBOSE def",
"self.logger = setup_logger(__name__) self.verbose = cfg.ANNO.VERBOSE def process(self): verbose = self.verbose logger =",
"= setup_logger(__name__) self.verbose = cfg.ANNO.VERBOSE def process(self): verbose = self.verbose logger = self.logger",
"-*- \"\"\" @date: 2020/7/14 下午8:34 @file: processor.py @author: zj @description: \"\"\" from ..anno",
"= self.parser.process() if verbose: logger.info('Save data in specified format') self.creator.save(output_data) if verbose: logger.info('Finish!!!')",
"setup_logger(__name__) self.verbose = cfg.ANNO.VERBOSE def process(self): verbose = self.verbose logger = self.logger if",
"logger.info('Processing original data') output_data = self.parser.process() if verbose: logger.info('Save data in specified format')",
"training data with specified format \"\"\" def __init__(self, cfg): self.parser = build_anno(cfg.ANNO.PARSER, cfg)"
] |
[
"= input() set_a = set(a) if len(set_a) % 2 == 0: print('CHAT WITH",
"input() set_a = set(a) if len(set_a) % 2 == 0: print('CHAT WITH HER!')",
"% 2 == 0: print('CHAT WITH HER!') else: print('IGNORE HIM!') if __name__ ==",
"set(a) if len(set_a) % 2 == 0: print('CHAT WITH HER!') else: print('IGNORE HIM!')",
"== 0: print('CHAT WITH HER!') else: print('IGNORE HIM!') if __name__ == \"__main__\": main()",
"= set(a) if len(set_a) % 2 == 0: print('CHAT WITH HER!') else: print('IGNORE",
"2 == 0: print('CHAT WITH HER!') else: print('IGNORE HIM!') if __name__ == \"__main__\":",
"a = input() set_a = set(a) if len(set_a) % 2 == 0: print('CHAT",
"set_a = set(a) if len(set_a) % 2 == 0: print('CHAT WITH HER!') else:",
"if len(set_a) % 2 == 0: print('CHAT WITH HER!') else: print('IGNORE HIM!') if",
"def main(): a = input() set_a = set(a) if len(set_a) % 2 ==",
"main(): a = input() set_a = set(a) if len(set_a) % 2 == 0:",
"len(set_a) % 2 == 0: print('CHAT WITH HER!') else: print('IGNORE HIM!') if __name__"
] |
[
"# pyflakes: disable-all from .api import * from .aug import * from .main",
"disable-all from .api import * from .aug import * from .main import *",
"<gh_stars>1000+ # pyflakes: disable-all from .api import * from .aug import * from",
"pyflakes: disable-all from .api import * from .aug import * from .main import"
] |
[
"# 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' # # def delete_with_placeholders(modeladmin, request, queryset): # for obj in queryset:",
"'ModelAdmin' methods # 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' # # def delete_with_placeholders(modeladmin, request, queryset): # for obj",
"may be better to use 'ModelAdmin' methods # 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' # # def delete_with_placeholders(modeladmin,",
"for obj in queryset: # obj.delete() # # # delete_with_placeholders.short_description = \"Delete Selected",
"potentially not, may be better to use 'ModelAdmin' methods # 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' # #",
"better to use 'ModelAdmin' methods # 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' # # def delete_with_placeholders(modeladmin, request, queryset):",
"# # def delete_with_placeholders(modeladmin, request, queryset): # for obj in queryset: # obj.delete()",
"methods # 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' # # def delete_with_placeholders(modeladmin, request, queryset): # for obj in",
"# for obj in queryset: # obj.delete() # # # delete_with_placeholders.short_description = \"Delete",
"use 'ModelAdmin' methods # 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' # # def delete_with_placeholders(modeladmin, request, queryset): # for",
"# def delete_with_placeholders(modeladmin, request, queryset): # for obj in queryset: # obj.delete() #",
"def delete_with_placeholders(modeladmin, request, queryset): # for obj in queryset: # obj.delete() # #",
"delete_with_placeholders(modeladmin, request, queryset): # for obj in queryset: # obj.delete() # # #",
"not, may be better to use 'ModelAdmin' methods # 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' # # def",
"# potentially not, may be better to use 'ModelAdmin' methods # 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' #",
"'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' # # def delete_with_placeholders(modeladmin, request, queryset): # for obj in queryset: #",
"obj in queryset: # obj.delete() # # # delete_with_placeholders.short_description = \"Delete Selected Items\"",
"to use 'ModelAdmin' methods # 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' # # def delete_with_placeholders(modeladmin, request, queryset): #",
"be better to use 'ModelAdmin' methods # 'https://docs.djangoproject.com/en/2.0/ref/contrib/admin/actions/#advanced-action-techniques' # # def delete_with_placeholders(modeladmin, request,",
"queryset): # for obj in queryset: # obj.delete() # # # delete_with_placeholders.short_description =",
"request, queryset): # for obj in queryset: # obj.delete() # # # delete_with_placeholders.short_description"
] |
[
"self.clear_cache() def tearDown(self): self.clear_cache() def test_get(self): assert config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT orig_preview_ext = settings.TEXT_PREVIEW_EXT",
"BaseTestCase class GetConfTextExtTest(BaseTestCase): def setUp(self): self.clear_cache() def tearDown(self): self.clear_cache() def test_get(self): assert config.TEXT_PREVIEW_EXT",
"constance import config from django.conf import settings from seahub.utils import get_conf_text_ext from seahub.test_utils",
"import settings from seahub.utils import get_conf_text_ext from seahub.test_utils import BaseTestCase class GetConfTextExtTest(BaseTestCase): def",
"seahub.utils import get_conf_text_ext from seahub.test_utils import BaseTestCase class GetConfTextExtTest(BaseTestCase): def setUp(self): self.clear_cache() def",
"def test_get(self): assert config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT orig_preview_ext = settings.TEXT_PREVIEW_EXT config.TEXT_PREVIEW_EXT = orig_preview_ext +",
"setUp(self): self.clear_cache() def tearDown(self): self.clear_cache() def test_get(self): assert config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT orig_preview_ext =",
"seahub.test_utils import BaseTestCase class GetConfTextExtTest(BaseTestCase): def setUp(self): self.clear_cache() def tearDown(self): self.clear_cache() def test_get(self):",
"get_conf_text_ext from seahub.test_utils import BaseTestCase class GetConfTextExtTest(BaseTestCase): def setUp(self): self.clear_cache() def tearDown(self): self.clear_cache()",
"settings from seahub.utils import get_conf_text_ext from seahub.test_utils import BaseTestCase class GetConfTextExtTest(BaseTestCase): def setUp(self):",
"from seahub.utils import get_conf_text_ext from seahub.test_utils import BaseTestCase class GetConfTextExtTest(BaseTestCase): def setUp(self): self.clear_cache()",
"config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT orig_preview_ext = settings.TEXT_PREVIEW_EXT config.TEXT_PREVIEW_EXT = orig_preview_ext + ',az' assert 'az'",
"from constance import config from django.conf import settings from seahub.utils import get_conf_text_ext from",
"assert config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT orig_preview_ext = settings.TEXT_PREVIEW_EXT config.TEXT_PREVIEW_EXT = orig_preview_ext + ',az' assert",
"django.conf import settings from seahub.utils import get_conf_text_ext from seahub.test_utils import BaseTestCase class GetConfTextExtTest(BaseTestCase):",
"import config from django.conf import settings from seahub.utils import get_conf_text_ext from seahub.test_utils import",
"class GetConfTextExtTest(BaseTestCase): def setUp(self): self.clear_cache() def tearDown(self): self.clear_cache() def test_get(self): assert config.TEXT_PREVIEW_EXT ==",
"def tearDown(self): self.clear_cache() def test_get(self): assert config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT orig_preview_ext = settings.TEXT_PREVIEW_EXT config.TEXT_PREVIEW_EXT",
"config from django.conf import settings from seahub.utils import get_conf_text_ext from seahub.test_utils import BaseTestCase",
"== settings.TEXT_PREVIEW_EXT orig_preview_ext = settings.TEXT_PREVIEW_EXT config.TEXT_PREVIEW_EXT = orig_preview_ext + ',az' assert 'az' in",
"def setUp(self): self.clear_cache() def tearDown(self): self.clear_cache() def test_get(self): assert config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT orig_preview_ext",
"from django.conf import settings from seahub.utils import get_conf_text_ext from seahub.test_utils import BaseTestCase class",
"settings.TEXT_PREVIEW_EXT orig_preview_ext = settings.TEXT_PREVIEW_EXT config.TEXT_PREVIEW_EXT = orig_preview_ext + ',az' assert 'az' in get_conf_text_ext()",
"<gh_stars>0 from constance import config from django.conf import settings from seahub.utils import get_conf_text_ext",
"import BaseTestCase class GetConfTextExtTest(BaseTestCase): def setUp(self): self.clear_cache() def tearDown(self): self.clear_cache() def test_get(self): assert",
"from seahub.test_utils import BaseTestCase class GetConfTextExtTest(BaseTestCase): def setUp(self): self.clear_cache() def tearDown(self): self.clear_cache() def",
"test_get(self): assert config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT orig_preview_ext = settings.TEXT_PREVIEW_EXT config.TEXT_PREVIEW_EXT = orig_preview_ext + ',az'",
"tearDown(self): self.clear_cache() def test_get(self): assert config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT orig_preview_ext = settings.TEXT_PREVIEW_EXT config.TEXT_PREVIEW_EXT =",
"self.clear_cache() def test_get(self): assert config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT orig_preview_ext = settings.TEXT_PREVIEW_EXT config.TEXT_PREVIEW_EXT = orig_preview_ext",
"import get_conf_text_ext from seahub.test_utils import BaseTestCase class GetConfTextExtTest(BaseTestCase): def setUp(self): self.clear_cache() def tearDown(self):",
"GetConfTextExtTest(BaseTestCase): def setUp(self): self.clear_cache() def tearDown(self): self.clear_cache() def test_get(self): assert config.TEXT_PREVIEW_EXT == settings.TEXT_PREVIEW_EXT"
] |
[
">>= 1; } *output = vec; } ''' # Get device and context,",
"log:') print(prog.get_build_info(dev, cl.program_build_info.LOG)) raise # Create output buffer out = cl.array.vec.zeros_int4() buffer_out =",
"= 0xFFFFFFFF */ if(vec.s2 == 7){ vec &= (int4)(-1, -1, 0, -1); }",
"|| vec.s1 < 16)){ vec.s3 >>= 1; } *output = vec; } '''",
"vec.s1 < 16)){ vec.s3 >>= 1; } *output = vec; } ''' #",
"= cl.array.vec.zeros_int4() buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize) # Enqueue kernel (with argument specified",
"prog.build(options=['-Werror'], devices=[dev]) except: print('Build log:') print(prog.get_build_info(dev, cl.program_build_info.LOG)) raise # Create output buffer out",
"vec.s01 = vec.s23 < 7; /* Divides the last element by 2 until",
"program in the specified context using the kernel source code prog = cl.Program(context,",
"cl.mem_flags.WRITE_ONLY, size=out.itemsize) # Enqueue kernel (with argument specified directly) n_globals = (1,) n_locals",
"kernel (with argument specified directly) n_globals = (1,) n_locals = None prog.op_test(queue, n_globals,",
"to every element of vec */ vec += 4; /* Sets the third",
"vec += 4; /* Sets the third element to 0 Doesn't change the",
"directly) n_globals = (1,) n_locals = None prog.op_test(queue, n_globals, n_locals, buffer_out) # Enqueue",
"to 7 */ while(vec.s3 > 7 && (vec.s0 < 16 || vec.s1 <",
"Get device and context, create command queue and program dev = utility.get_default_device() context",
"(-1 in hexadecimal = 0xFFFFFFFF */ if(vec.s2 == 7){ vec &= (int4)(-1, -1,",
"-1, 0, -1); } /* Sets the first element to -1, the second",
"and context, create command queue and program dev = utility.get_default_device() context = cl.Context(devices=[dev])",
"''' # Get device and context, create command queue and program dev =",
"# Create output buffer out = cl.array.vec.zeros_int4() buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize) #",
"= (int4)(1, 2, 3, 4); /* Adds 4 to every element of vec",
"buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize) # Enqueue kernel (with argument specified directly) n_globals",
"< 16)){ vec.s3 >>= 1; } *output = vec; } ''' # Get",
"= cl.CommandQueue(context, dev) # Build program in the specified context using the kernel",
"usage (and vector usage) ''' import pyopencl as cl import pyopencl.array import utility",
"4; /* Sets the third element to 0 Doesn't change the other elements",
"-1); } /* Sets the first element to -1, the second to 0",
"it is less than or equal to 7 */ while(vec.s3 > 7 &&",
"while(vec.s3 > 7 && (vec.s0 < 16 || vec.s1 < 16)){ vec.s3 >>=",
"queue = cl.CommandQueue(context, dev) # Build program in the specified context using the",
"print(prog.get_build_info(dev, cl.program_build_info.LOG)) raise # Create output buffer out = cl.array.vec.zeros_int4() buffer_out = cl.Buffer(context,",
"Listing 5.1: Operator usage (and vector usage) ''' import pyopencl as cl import",
"the kernel source code prog = cl.Program(context, kernel_src) try: prog.build(options=['-Werror'], devices=[dev]) except: print('Build",
"(with argument specified directly) n_globals = (1,) n_locals = None prog.op_test(queue, n_globals, n_locals,",
"pyopencl as cl import pyopencl.array import utility kernel_src = ''' __kernel void op_test(__global",
"2, 3, 4); /* Adds 4 to every element of vec */ vec",
"7 */ while(vec.s3 > 7 && (vec.s0 < 16 || vec.s1 < 16)){",
"> 7 && (vec.s0 < 16 || vec.s1 < 16)){ vec.s3 >>= 1;",
"(int4)(-1, -1, 0, -1); } /* Sets the first element to -1, the",
"third element to 0 Doesn't change the other elements (-1 in hexadecimal =",
"code prog = cl.Program(context, kernel_src) try: prog.build(options=['-Werror'], devices=[dev]) except: print('Build log:') print(prog.get_build_info(dev, cl.program_build_info.LOG))",
"__kernel void op_test(__global int4 *output) { int4 vec = (int4)(1, 2, 3, 4);",
"int4 vec = (int4)(1, 2, 3, 4); /* Adds 4 to every element",
"in hexadecimal = 0xFFFFFFFF */ if(vec.s2 == 7){ vec &= (int4)(-1, -1, 0,",
"16)){ vec.s3 >>= 1; } *output = vec; } ''' # Get device",
"7; /* Divides the last element by 2 until it is less than",
"specified directly) n_globals = (1,) n_locals = None prog.op_test(queue, n_globals, n_locals, buffer_out) #",
"element to 0 Doesn't change the other elements (-1 in hexadecimal = 0xFFFFFFFF",
"# Enqueue kernel (with argument specified directly) n_globals = (1,) n_locals = None",
"to 0 */ vec.s01 = vec.s23 < 7; /* Divides the last element",
"0 */ vec.s01 = vec.s23 < 7; /* Divides the last element by",
"element by 2 until it is less than or equal to 7 */",
"prog.op_test(queue, n_globals, n_locals, buffer_out) # Enqueue command to copy from buffer_out to host",
"/* Divides the last element by 2 until it is less than or",
"command queue and program dev = utility.get_default_device() context = cl.Context(devices=[dev]) queue = cl.CommandQueue(context,",
"Divides the last element by 2 until it is less than or equal",
"program dev = utility.get_default_device() context = cl.Context(devices=[dev]) queue = cl.CommandQueue(context, dev) # Build",
"pyopencl.array import utility kernel_src = ''' __kernel void op_test(__global int4 *output) { int4",
"0, -1); } /* Sets the first element to -1, the second to",
"buffer out = cl.array.vec.zeros_int4() buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize) # Enqueue kernel (with",
"None prog.op_test(queue, n_globals, n_locals, buffer_out) # Enqueue command to copy from buffer_out to",
"prog = cl.Program(context, kernel_src) try: prog.build(options=['-Werror'], devices=[dev]) except: print('Build log:') print(prog.get_build_info(dev, cl.program_build_info.LOG)) raise",
"vec &= (int4)(-1, -1, 0, -1); } /* Sets the first element to",
"cl import pyopencl.array import utility kernel_src = ''' __kernel void op_test(__global int4 *output)",
"= vec.s23 < 7; /* Divides the last element by 2 until it",
"Operator usage (and vector usage) ''' import pyopencl as cl import pyopencl.array import",
"int4 *output) { int4 vec = (int4)(1, 2, 3, 4); /* Adds 4",
"} ''' # Get device and context, create command queue and program dev",
"1; } *output = vec; } ''' # Get device and context, create",
"size=out.itemsize) # Enqueue kernel (with argument specified directly) n_globals = (1,) n_locals =",
"*output = vec; } ''' # Get device and context, create command queue",
"(1,) n_locals = None prog.op_test(queue, n_globals, n_locals, buffer_out) # Enqueue command to copy",
"kernel_src = ''' __kernel void op_test(__global int4 *output) { int4 vec = (int4)(1,",
"vec */ vec += 4; /* Sets the third element to 0 Doesn't",
"(and vector usage) ''' import pyopencl as cl import pyopencl.array import utility kernel_src",
"the third element to 0 Doesn't change the other elements (-1 in hexadecimal",
"of vec */ vec += 4; /* Sets the third element to 0",
"= vec; } ''' # Get device and context, create command queue and",
"other elements (-1 in hexadecimal = 0xFFFFFFFF */ if(vec.s2 == 7){ vec &=",
"the specified context using the kernel source code prog = cl.Program(context, kernel_src) try:",
"# Build program in the specified context using the kernel source code prog",
"usage) ''' import pyopencl as cl import pyopencl.array import utility kernel_src = '''",
"''' __kernel void op_test(__global int4 *output) { int4 vec = (int4)(1, 2, 3,",
"16 || vec.s1 < 16)){ vec.s3 >>= 1; } *output = vec; }",
"than or equal to 7 */ while(vec.s3 > 7 && (vec.s0 < 16",
"devices=[dev]) except: print('Build log:') print(prog.get_build_info(dev, cl.program_build_info.LOG)) raise # Create output buffer out =",
"copy from buffer_out to host memory cl.enqueue_copy(queue, dest=out, src=buffer_out, is_blocking=True) print('Output: ' +",
"output buffer out = cl.array.vec.zeros_int4() buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize) # Enqueue kernel",
"elements (-1 in hexadecimal = 0xFFFFFFFF */ if(vec.s2 == 7){ vec &= (int4)(-1,",
"} *output = vec; } ''' # Get device and context, create command",
"raise # Create output buffer out = cl.array.vec.zeros_int4() buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize)",
"to copy from buffer_out to host memory cl.enqueue_copy(queue, dest=out, src=buffer_out, is_blocking=True) print('Output: '",
"0xFFFFFFFF */ if(vec.s2 == 7){ vec &= (int4)(-1, -1, 0, -1); } /*",
"< 16 || vec.s1 < 16)){ vec.s3 >>= 1; } *output = vec;",
"''' Listing 5.1: Operator usage (and vector usage) ''' import pyopencl as cl",
"less than or equal to 7 */ while(vec.s3 > 7 && (vec.s0 <",
"+= 4; /* Sets the third element to 0 Doesn't change the other",
"== 7){ vec &= (int4)(-1, -1, 0, -1); } /* Sets the first",
"} /* Sets the first element to -1, the second to 0 */",
"*/ while(vec.s3 > 7 && (vec.s0 < 16 || vec.s1 < 16)){ vec.s3",
"queue and program dev = utility.get_default_device() context = cl.Context(devices=[dev]) queue = cl.CommandQueue(context, dev)",
"/* Sets the first element to -1, the second to 0 */ vec.s01",
"equal to 7 */ while(vec.s3 > 7 && (vec.s0 < 16 || vec.s1",
"print('Build log:') print(prog.get_build_info(dev, cl.program_build_info.LOG)) raise # Create output buffer out = cl.array.vec.zeros_int4() buffer_out",
"hexadecimal = 0xFFFFFFFF */ if(vec.s2 == 7){ vec &= (int4)(-1, -1, 0, -1);",
"the first element to -1, the second to 0 */ vec.s01 = vec.s23",
"= ''' __kernel void op_test(__global int4 *output) { int4 vec = (int4)(1, 2,",
"to -1, the second to 0 */ vec.s01 = vec.s23 < 7; /*",
"or equal to 7 */ while(vec.s3 > 7 && (vec.s0 < 16 ||",
"< 7; /* Divides the last element by 2 until it is less",
"to 0 Doesn't change the other elements (-1 in hexadecimal = 0xFFFFFFFF */",
"n_globals = (1,) n_locals = None prog.op_test(queue, n_globals, n_locals, buffer_out) # Enqueue command",
"2 until it is less than or equal to 7 */ while(vec.s3 >",
"by 2 until it is less than or equal to 7 */ while(vec.s3",
"vec.s23 < 7; /* Divides the last element by 2 until it is",
"Enqueue command to copy from buffer_out to host memory cl.enqueue_copy(queue, dest=out, src=buffer_out, is_blocking=True)",
"command to copy from buffer_out to host memory cl.enqueue_copy(queue, dest=out, src=buffer_out, is_blocking=True) print('Output:",
"3, 4); /* Adds 4 to every element of vec */ vec +=",
"*/ if(vec.s2 == 7){ vec &= (int4)(-1, -1, 0, -1); } /* Sets",
"Build program in the specified context using the kernel source code prog =",
"cl.program_build_info.LOG)) raise # Create output buffer out = cl.array.vec.zeros_int4() buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY,",
"import pyopencl as cl import pyopencl.array import utility kernel_src = ''' __kernel void",
"cl.Context(devices=[dev]) queue = cl.CommandQueue(context, dev) # Build program in the specified context using",
"''' import pyopencl as cl import pyopencl.array import utility kernel_src = ''' __kernel",
"Sets the third element to 0 Doesn't change the other elements (-1 in",
"from buffer_out to host memory cl.enqueue_copy(queue, dest=out, src=buffer_out, is_blocking=True) print('Output: ' + str(out))",
"import utility kernel_src = ''' __kernel void op_test(__global int4 *output) { int4 vec",
"utility.get_default_device() context = cl.Context(devices=[dev]) queue = cl.CommandQueue(context, dev) # Build program in the",
"= utility.get_default_device() context = cl.Context(devices=[dev]) queue = cl.CommandQueue(context, dev) # Build program in",
"try: prog.build(options=['-Werror'], devices=[dev]) except: print('Build log:') print(prog.get_build_info(dev, cl.program_build_info.LOG)) raise # Create output buffer",
"= (1,) n_locals = None prog.op_test(queue, n_globals, n_locals, buffer_out) # Enqueue command to",
"using the kernel source code prog = cl.Program(context, kernel_src) try: prog.build(options=['-Werror'], devices=[dev]) except:",
"specified context using the kernel source code prog = cl.Program(context, kernel_src) try: prog.build(options=['-Werror'],",
"the other elements (-1 in hexadecimal = 0xFFFFFFFF */ if(vec.s2 == 7){ vec",
"&= (int4)(-1, -1, 0, -1); } /* Sets the first element to -1,",
"until it is less than or equal to 7 */ while(vec.s3 > 7",
"dev = utility.get_default_device() context = cl.Context(devices=[dev]) queue = cl.CommandQueue(context, dev) # Build program",
"= cl.Context(devices=[dev]) queue = cl.CommandQueue(context, dev) # Build program in the specified context",
"cl.Program(context, kernel_src) try: prog.build(options=['-Werror'], devices=[dev]) except: print('Build log:') print(prog.get_build_info(dev, cl.program_build_info.LOG)) raise # Create",
"n_locals = None prog.op_test(queue, n_globals, n_locals, buffer_out) # Enqueue command to copy from",
"(vec.s0 < 16 || vec.s1 < 16)){ vec.s3 >>= 1; } *output =",
"void op_test(__global int4 *output) { int4 vec = (int4)(1, 2, 3, 4); /*",
"change the other elements (-1 in hexadecimal = 0xFFFFFFFF */ if(vec.s2 == 7){",
"n_globals, n_locals, buffer_out) # Enqueue command to copy from buffer_out to host memory",
"the second to 0 */ vec.s01 = vec.s23 < 7; /* Divides the",
"the last element by 2 until it is less than or equal to",
"= cl.Program(context, kernel_src) try: prog.build(options=['-Werror'], devices=[dev]) except: print('Build log:') print(prog.get_build_info(dev, cl.program_build_info.LOG)) raise #",
"cl.CommandQueue(context, dev) # Build program in the specified context using the kernel source",
"if(vec.s2 == 7){ vec &= (int4)(-1, -1, 0, -1); } /* Sets the",
"create command queue and program dev = utility.get_default_device() context = cl.Context(devices=[dev]) queue =",
"kernel_src) try: prog.build(options=['-Werror'], devices=[dev]) except: print('Build log:') print(prog.get_build_info(dev, cl.program_build_info.LOG)) raise # Create output",
"7){ vec &= (int4)(-1, -1, 0, -1); } /* Sets the first element",
"op_test(__global int4 *output) { int4 vec = (int4)(1, 2, 3, 4); /* Adds",
"# Get device and context, create command queue and program dev = utility.get_default_device()",
"7 && (vec.s0 < 16 || vec.s1 < 16)){ vec.s3 >>= 1; }",
"argument specified directly) n_globals = (1,) n_locals = None prog.op_test(queue, n_globals, n_locals, buffer_out)",
"n_locals, buffer_out) # Enqueue command to copy from buffer_out to host memory cl.enqueue_copy(queue,",
"vec; } ''' # Get device and context, create command queue and program",
"last element by 2 until it is less than or equal to 7",
"element of vec */ vec += 4; /* Sets the third element to",
"-1, the second to 0 */ vec.s01 = vec.s23 < 7; /* Divides",
"import pyopencl.array import utility kernel_src = ''' __kernel void op_test(__global int4 *output) {",
"and program dev = utility.get_default_device() context = cl.Context(devices=[dev]) queue = cl.CommandQueue(context, dev) #",
"dev) # Build program in the specified context using the kernel source code",
"Create output buffer out = cl.array.vec.zeros_int4() buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize) # Enqueue",
"cl.array.vec.zeros_int4() buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize) # Enqueue kernel (with argument specified directly)",
"every element of vec */ vec += 4; /* Sets the third element",
"Enqueue kernel (with argument specified directly) n_globals = (1,) n_locals = None prog.op_test(queue,",
"context = cl.Context(devices=[dev]) queue = cl.CommandQueue(context, dev) # Build program in the specified",
"vector usage) ''' import pyopencl as cl import pyopencl.array import utility kernel_src =",
"source code prog = cl.Program(context, kernel_src) try: prog.build(options=['-Werror'], devices=[dev]) except: print('Build log:') print(prog.get_build_info(dev,",
"buffer_out) # Enqueue command to copy from buffer_out to host memory cl.enqueue_copy(queue, dest=out,",
"Sets the first element to -1, the second to 0 */ vec.s01 =",
"vec = (int4)(1, 2, 3, 4); /* Adds 4 to every element of",
"4); /* Adds 4 to every element of vec */ vec += 4;",
"except: print('Build log:') print(prog.get_build_info(dev, cl.program_build_info.LOG)) raise # Create output buffer out = cl.array.vec.zeros_int4()",
"second to 0 */ vec.s01 = vec.s23 < 7; /* Divides the last",
"out = cl.array.vec.zeros_int4() buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize) # Enqueue kernel (with argument",
"vec.s3 >>= 1; } *output = vec; } ''' # Get device and",
"/* Sets the third element to 0 Doesn't change the other elements (-1",
"(int4)(1, 2, 3, 4); /* Adds 4 to every element of vec */",
"context using the kernel source code prog = cl.Program(context, kernel_src) try: prog.build(options=['-Werror'], devices=[dev])",
"in the specified context using the kernel source code prog = cl.Program(context, kernel_src)",
"= None prog.op_test(queue, n_globals, n_locals, buffer_out) # Enqueue command to copy from buffer_out",
"4 to every element of vec */ vec += 4; /* Sets the",
"*output) { int4 vec = (int4)(1, 2, 3, 4); /* Adds 4 to",
"first element to -1, the second to 0 */ vec.s01 = vec.s23 <",
"Doesn't change the other elements (-1 in hexadecimal = 0xFFFFFFFF */ if(vec.s2 ==",
"cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize) # Enqueue kernel (with argument specified directly) n_globals = (1,)",
"kernel source code prog = cl.Program(context, kernel_src) try: prog.build(options=['-Werror'], devices=[dev]) except: print('Build log:')",
"5.1: Operator usage (and vector usage) ''' import pyopencl as cl import pyopencl.array",
"utility kernel_src = ''' __kernel void op_test(__global int4 *output) { int4 vec =",
"context, create command queue and program dev = utility.get_default_device() context = cl.Context(devices=[dev]) queue",
"*/ vec += 4; /* Sets the third element to 0 Doesn't change",
"element to -1, the second to 0 */ vec.s01 = vec.s23 < 7;",
"{ int4 vec = (int4)(1, 2, 3, 4); /* Adds 4 to every",
"device and context, create command queue and program dev = utility.get_default_device() context =",
"/* Adds 4 to every element of vec */ vec += 4; /*",
"0 Doesn't change the other elements (-1 in hexadecimal = 0xFFFFFFFF */ if(vec.s2",
"&& (vec.s0 < 16 || vec.s1 < 16)){ vec.s3 >>= 1; } *output",
"= cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize) # Enqueue kernel (with argument specified directly) n_globals =",
"is less than or equal to 7 */ while(vec.s3 > 7 && (vec.s0",
"# Enqueue command to copy from buffer_out to host memory cl.enqueue_copy(queue, dest=out, src=buffer_out,",
"Adds 4 to every element of vec */ vec += 4; /* Sets",
"as cl import pyopencl.array import utility kernel_src = ''' __kernel void op_test(__global int4",
"*/ vec.s01 = vec.s23 < 7; /* Divides the last element by 2"
] |
[
"fight_start() else: exit() def time_past(): global cur_time cur_time=time.time() return int(cur_time-ori_time) def wait(): global",
"def wait(): global double_water,cur_time if double_water: time.sleep(3) else: cur_time=time.time() if cur_time-ori_time>=120: double_water=True time.sleep(3)",
"cur_time=time.time() return int(cur_time-ori_time) def wait(): global double_water,cur_time if double_water: time.sleep(3) else: cur_time=time.time() if",
"cur_time cur_time=time.time() return int(cur_time-ori_time) def wait(): global double_water,cur_time if double_water: time.sleep(3) else: cur_time=time.time()",
"AUTO=False def fight_start(): global ori_time,started,double_water,pre_time ori_time=time.time() pre_time=0 started=True double_water=False def fight_end(): global started",
"double_water=False def fight_end(): global started print(\"Fight Finished.\") if AUTO: next_fight() fight_start() else: exit()",
"waves,cur_time,pre_time cur_time=time.time() if cur_time-pre_time>=30: waves+=1 # pre_time=cur_time return True else: return False def",
"def time_past(): global cur_time cur_time=time.time() return int(cur_time-ori_time) def wait(): global double_water,cur_time if double_water:",
"global started print(\"Fight Finished.\") if AUTO: next_fight() fight_start() else: exit() def time_past(): global",
"cur_time=time.time() if cur_time-pre_time>=30: waves+=1 # pre_time=cur_time return True else: return False def set_pre_time():",
"double_water=False AUTO=False def fight_start(): global ori_time,started,double_water,pre_time ori_time=time.time() pre_time=0 started=True double_water=False def fight_end(): global",
"waves=int() double_water=False AUTO=False def fight_start(): global ori_time,started,double_water,pre_time ori_time=time.time() pre_time=0 started=True double_water=False def fight_end():",
"cur_time=int() pre_time=int() waves=int() double_water=False AUTO=False def fight_start(): global ori_time,started,double_water,pre_time ori_time=time.time() pre_time=0 started=True double_water=False",
"surgeon import * ori_time=int() cur_time=int() pre_time=int() waves=int() double_water=False AUTO=False def fight_start(): global ori_time,started,double_water,pre_time",
"def fight_start(): global ori_time,started,double_water,pre_time ori_time=time.time() pre_time=0 started=True double_water=False def fight_end(): global started print(\"Fight",
"ori_time=int() cur_time=int() pre_time=int() waves=int() double_water=False AUTO=False def fight_start(): global ori_time,started,double_water,pre_time ori_time=time.time() pre_time=0 started=True",
"def fight_end(): global started print(\"Fight Finished.\") if AUTO: next_fight() fight_start() else: exit() def",
"exit() def time_past(): global cur_time cur_time=time.time() return int(cur_time-ori_time) def wait(): global double_water,cur_time if",
"if cur_time-ori_time>=120: double_water=True time.sleep(3) else: time.sleep(5) def Fight(): global waves,cur_time,pre_time cur_time=time.time() if cur_time-pre_time>=30:",
"double_water,cur_time if double_water: time.sleep(3) else: cur_time=time.time() if cur_time-ori_time>=120: double_water=True time.sleep(3) else: time.sleep(5) def",
"Fight(): global waves,cur_time,pre_time cur_time=time.time() if cur_time-pre_time>=30: waves+=1 # pre_time=cur_time return True else: return",
"started=True double_water=False def fight_end(): global started print(\"Fight Finished.\") if AUTO: next_fight() fight_start() else:",
"waves+=1 # pre_time=cur_time return True else: return False def set_pre_time(): global pre_time pre_time=time.time()",
"fight_start(): global ori_time,started,double_water,pre_time ori_time=time.time() pre_time=0 started=True double_water=False def fight_end(): global started print(\"Fight Finished.\")",
"import * ori_time=int() cur_time=int() pre_time=int() waves=int() double_water=False AUTO=False def fight_start(): global ori_time,started,double_water,pre_time ori_time=time.time()",
"global waves,cur_time,pre_time cur_time=time.time() if cur_time-pre_time>=30: waves+=1 # pre_time=cur_time return True else: return False",
"ori_time,started,double_water,pre_time ori_time=time.time() pre_time=0 started=True double_water=False def fight_end(): global started print(\"Fight Finished.\") if AUTO:",
"if AUTO: next_fight() fight_start() else: exit() def time_past(): global cur_time cur_time=time.time() return int(cur_time-ori_time)",
"# pre_time=cur_time return True else: return False def set_pre_time(): global pre_time pre_time=time.time() if",
"global cur_time cur_time=time.time() return int(cur_time-ori_time) def wait(): global double_water,cur_time if double_water: time.sleep(3) else:",
"cur_time=time.time() if cur_time-ori_time>=120: double_water=True time.sleep(3) else: time.sleep(5) def Fight(): global waves,cur_time,pre_time cur_time=time.time() if",
"True else: return False def set_pre_time(): global pre_time pre_time=time.time() if __name__=='__main__': print(\"Regulator Here\")",
"time.sleep(3) else: time.sleep(5) def Fight(): global waves,cur_time,pre_time cur_time=time.time() if cur_time-pre_time>=30: waves+=1 # pre_time=cur_time",
"Finished.\") if AUTO: next_fight() fight_start() else: exit() def time_past(): global cur_time cur_time=time.time() return",
"time_past(): global cur_time cur_time=time.time() return int(cur_time-ori_time) def wait(): global double_water,cur_time if double_water: time.sleep(3)",
"int(cur_time-ori_time) def wait(): global double_water,cur_time if double_water: time.sleep(3) else: cur_time=time.time() if cur_time-ori_time>=120: double_water=True",
"if cur_time-pre_time>=30: waves+=1 # pre_time=cur_time return True else: return False def set_pre_time(): global",
"pre_time=cur_time return True else: return False def set_pre_time(): global pre_time pre_time=time.time() if __name__=='__main__':",
"next_fight() fight_start() else: exit() def time_past(): global cur_time cur_time=time.time() return int(cur_time-ori_time) def wait():",
"print(\"Fight Finished.\") if AUTO: next_fight() fight_start() else: exit() def time_past(): global cur_time cur_time=time.time()",
"else: cur_time=time.time() if cur_time-ori_time>=120: double_water=True time.sleep(3) else: time.sleep(5) def Fight(): global waves,cur_time,pre_time cur_time=time.time()",
"cur_time-ori_time>=120: double_water=True time.sleep(3) else: time.sleep(5) def Fight(): global waves,cur_time,pre_time cur_time=time.time() if cur_time-pre_time>=30: waves+=1",
"started print(\"Fight Finished.\") if AUTO: next_fight() fight_start() else: exit() def time_past(): global cur_time",
"pre_time=int() waves=int() double_water=False AUTO=False def fight_start(): global ori_time,started,double_water,pre_time ori_time=time.time() pre_time=0 started=True double_water=False def",
"wait(): global double_water,cur_time if double_water: time.sleep(3) else: cur_time=time.time() if cur_time-ori_time>=120: double_water=True time.sleep(3) else:",
"else: time.sleep(5) def Fight(): global waves,cur_time,pre_time cur_time=time.time() if cur_time-pre_time>=30: waves+=1 # pre_time=cur_time return",
"pre_time=0 started=True double_water=False def fight_end(): global started print(\"Fight Finished.\") if AUTO: next_fight() fight_start()",
"return True else: return False def set_pre_time(): global pre_time pre_time=time.time() if __name__=='__main__': print(\"Regulator",
"fight_end(): global started print(\"Fight Finished.\") if AUTO: next_fight() fight_start() else: exit() def time_past():",
"from surgeon import * ori_time=int() cur_time=int() pre_time=int() waves=int() double_water=False AUTO=False def fight_start(): global",
"time.sleep(3) else: cur_time=time.time() if cur_time-ori_time>=120: double_water=True time.sleep(3) else: time.sleep(5) def Fight(): global waves,cur_time,pre_time",
"time.sleep(5) def Fight(): global waves,cur_time,pre_time cur_time=time.time() if cur_time-pre_time>=30: waves+=1 # pre_time=cur_time return True",
"global double_water,cur_time if double_water: time.sleep(3) else: cur_time=time.time() if cur_time-ori_time>=120: double_water=True time.sleep(3) else: time.sleep(5)",
"if double_water: time.sleep(3) else: cur_time=time.time() if cur_time-ori_time>=120: double_water=True time.sleep(3) else: time.sleep(5) def Fight():",
"double_water: time.sleep(3) else: cur_time=time.time() if cur_time-ori_time>=120: double_water=True time.sleep(3) else: time.sleep(5) def Fight(): global",
"* ori_time=int() cur_time=int() pre_time=int() waves=int() double_water=False AUTO=False def fight_start(): global ori_time,started,double_water,pre_time ori_time=time.time() pre_time=0",
"return int(cur_time-ori_time) def wait(): global double_water,cur_time if double_water: time.sleep(3) else: cur_time=time.time() if cur_time-ori_time>=120:",
"import time from surgeon import * ori_time=int() cur_time=int() pre_time=int() waves=int() double_water=False AUTO=False def",
"ori_time=time.time() pre_time=0 started=True double_water=False def fight_end(): global started print(\"Fight Finished.\") if AUTO: next_fight()",
"time from surgeon import * ori_time=int() cur_time=int() pre_time=int() waves=int() double_water=False AUTO=False def fight_start():",
"AUTO: next_fight() fight_start() else: exit() def time_past(): global cur_time cur_time=time.time() return int(cur_time-ori_time) def",
"def Fight(): global waves,cur_time,pre_time cur_time=time.time() if cur_time-pre_time>=30: waves+=1 # pre_time=cur_time return True else:",
"cur_time-pre_time>=30: waves+=1 # pre_time=cur_time return True else: return False def set_pre_time(): global pre_time",
"global ori_time,started,double_water,pre_time ori_time=time.time() pre_time=0 started=True double_water=False def fight_end(): global started print(\"Fight Finished.\") if",
"else: exit() def time_past(): global cur_time cur_time=time.time() return int(cur_time-ori_time) def wait(): global double_water,cur_time",
"double_water=True time.sleep(3) else: time.sleep(5) def Fight(): global waves,cur_time,pre_time cur_time=time.time() if cur_time-pre_time>=30: waves+=1 #"
] |
[
"for velocity_x in range(min_vx, max_vx+1): min_vx = math.floor((1 + math.sqrt(1 + target.left *",
"self.right) and (self.bottom <= y <= self.top) @staticmethod def from_input(string): match = re.search(r\"target",
"2) max_vx = target.right for velocity_y in range(min_vy, max_vy+1): hit, maxy = hit_target(velocity_x,",
"probe_y) velocity_x -= sign(velocity_x) velocity_y -= 1 _t += 1 if target.inside(probe_x, probe_y):",
"puzzle = Rect.from_input(\"target area: x=209..238, y=-86..-59\") example = Rect.from_input(\"target area: x=20..30, y=-10..-5\") def",
"top): self.left = left self.right = right self.bottom = bottom self.top = top",
"import math from typing import Tuple class Rect: \"\"\"A 2D rectangle defined by",
"bottom-right positions\"\"\" def __init__(self, left, right, bottom, top): self.left = left self.right =",
"hit: global_maxima = max(global_maxima, maxy) hit_count += 1 print(f\"What is the highest y",
"smart brute-force over sensible ranges min_vx = 0 max_vx = target.right # max",
"self.left = left self.right = right self.bottom = bottom self.top = top def",
"assert False # Shouldn't reach return None def sign(_n): if _n > 0:",
"for maxvy max_vy = -min_vy # not much thinkin here (explore the same",
"return -1 return 0 def hit_target(vx0, vy0, target:Rect) -> Tuple[bool, int]: \"\"\"Simulate the",
"re.search(r\"target area: x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\", string) if match: left = int(match.group(1)) right = int(match.group(2))",
"bottom, top): self.left = left self.right = right self.bottom = bottom self.top =",
"the target area. Returns wether probe reaches the target area in a discrete",
"<= self.right) and (self.bottom <= y <= self.top) @staticmethod def from_input(string): match =",
"if the probe reaches the target area. Returns wether probe reaches the target",
"def from_input(string): match = re.search(r\"target area: x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\", string) if match: left =",
"= vx0 velocity_y = vy0 probe_x = 0 probe_y = 0 _t =",
"= 0 while probe_x < target.right and probe_y > target.bottom: probe_x += velocity_x",
"= Rect.from_input(\"target area: x=20..30, y=-10..-5\") def both_parts_bruteforce(target): global_maxima = 0 hit_count = 0",
"0: return 1 if _n < 0: return -1 return 0 def hit_target(vx0,",
"class Rect: \"\"\"A 2D rectangle defined by top-left and bottom-right positions\"\"\" def __init__(self,",
"+ math.sqrt(1 + target.left * 8)) / 2) max_vx = target.right for velocity_y",
"math from typing import Tuple class Rect: \"\"\"A 2D rectangle defined by top-left",
"max_vy+1): hit, maxy = hit_target(velocity_x, velocity_y, target) if hit: global_maxima = max(global_maxima, maxy)",
"match: left = int(match.group(1)) right = int(match.group(2)) bottom = int(match.group(3)) top = int(match.group(4))",
"< target.right and probe_y > target.bottom: probe_x += velocity_x probe_y += velocity_y max_height",
"on the trajectory.\"\"\" velocity_x = vx0 velocity_y = vy0 probe_x = 0 probe_y",
"probe shooting and check if the probe reaches the target area. Returns wether",
"{global_maxima}\") print(f\"How many distinct initial velocity values cause the probe to be within",
"self.bottom = bottom self.top = top def inside(self, x, y): \"\"\"Checks if a",
"= max(max_height, probe_y) velocity_x -= sign(velocity_x) velocity_y -= 1 _t += 1 if",
"0 probe_y = 0 _t = 0 max_height = 0 while probe_x <",
"area. Returns wether probe reaches the target area in a discrete t and,",
"1 if target.inside(probe_x, probe_y): return True, max_height return False, 0 puzzle = Rect.from_input(\"target",
"8)) / 2) max_vx = target.right for velocity_y in range(min_vy, max_vy+1): hit, maxy",
"x, y): \"\"\"Checks if a given x, y point is inside the rect\"\"\"",
"Rect(left, right, bottom, top) assert False # Shouldn't reach return None def sign(_n):",
"is inside the rect\"\"\" return (self.left <= x <= self.right) and (self.bottom <=",
"= int(match.group(2)) bottom = int(match.group(3)) top = int(match.group(4)) return Rect(left, right, bottom, top)",
"velocity_x -= sign(velocity_x) velocity_y -= 1 _t += 1 if target.inside(probe_x, probe_y): return",
"if match: left = int(match.group(1)) right = int(match.group(2)) bottom = int(match.group(3)) top =",
"right, bottom, top): self.left = left self.right = right self.bottom = bottom self.top",
"the probe reaches the target area. Returns wether probe reaches the target area",
"maxy) hit_count += 1 print(f\"What is the highest y position it reaches on",
"range in positive than in negative) for velocity_x in range(min_vx, max_vx+1): min_vx =",
"True, max_height return False, 0 puzzle = Rect.from_input(\"target area: x=209..238, y=-86..-59\") example =",
"in range(min_vy, max_vy+1): hit, maxy = hit_target(velocity_x, velocity_y, target) if hit: global_maxima =",
"0 max_vx = target.right # max speed is hitting the right of the",
"wether probe reaches the target area in a discrete t and, in that",
"self.right = right self.bottom = bottom self.top = top def inside(self, x, y):",
"and, in that case, the maximum height it reaches on the trajectory.\"\"\" velocity_x",
"= 0 max_height = 0 while probe_x < target.right and probe_y > target.bottom:",
"= int(match.group(1)) right = int(match.group(2)) bottom = int(match.group(3)) top = int(match.group(4)) return Rect(left,",
"math.sqrt(1 + target.left * 8)) / 2) max_vx = target.right for velocity_y in",
"= 0 # do a smart brute-force over sensible ranges min_vx = 0",
"probe_y += velocity_y max_height = max(max_height, probe_y) velocity_x -= sign(velocity_x) velocity_y -= 1",
"= math.floor((1 + math.sqrt(1 + target.left * 8)) / 2) max_vx = target.right",
"\"\"\"Checks if a given x, y point is inside the rect\"\"\" return (self.left",
"height it reaches on the trajectory.\"\"\" velocity_x = vx0 velocity_y = vy0 probe_x",
"the maximum height it reaches on the trajectory.\"\"\" velocity_x = vx0 velocity_y =",
"max_vx = target.right # max speed is hitting the right of the area",
"probe_x += velocity_x probe_y += velocity_y max_height = max(max_height, probe_y) velocity_x -= sign(velocity_x)",
"> 0: return 1 if _n < 0: return -1 return 0 def",
"reaches the target area in a discrete t and, in that case, the",
"target) if hit: global_maxima = max(global_maxima, maxy) hit_count += 1 print(f\"What is the",
"probe_x < target.right and probe_y > target.bottom: probe_x += velocity_x probe_y += velocity_y",
"Tuple class Rect: \"\"\"A 2D rectangle defined by top-left and bottom-right positions\"\"\" def",
"= 0 hit_count = 0 # do a smart brute-force over sensible ranges",
"probe_x = 0 probe_y = 0 _t = 0 max_height = 0 while",
"_t = 0 max_height = 0 while probe_x < target.right and probe_y >",
"= target.right for velocity_y in range(min_vy, max_vy+1): hit, maxy = hit_target(velocity_x, velocity_y, target)",
"= int(match.group(4)) return Rect(left, right, bottom, top) assert False # Shouldn't reach return",
"in negative) for velocity_x in range(min_vx, max_vx+1): min_vx = math.floor((1 + math.sqrt(1 +",
"of the area in t=1 min_vy = min(target.bottom, target.top) # use the same",
"the target area in a discrete t and, in that case, the maximum",
"vy0 probe_x = 0 probe_y = 0 _t = 0 max_height = 0",
"y): \"\"\"Checks if a given x, y point is inside the rect\"\"\" return",
"self.top = top def inside(self, x, y): \"\"\"Checks if a given x, y",
"_n < 0: return -1 return 0 def hit_target(vx0, vy0, target:Rect) -> Tuple[bool,",
"ranges min_vx = 0 max_vx = target.right # max speed is hitting the",
"Rect: \"\"\"A 2D rectangle defined by top-left and bottom-right positions\"\"\" def __init__(self, left,",
"distinct initial velocity values cause the probe to be within the target area",
"this trajectory? {global_maxima}\") print(f\"How many distinct initial velocity values cause the probe to",
"over sensible ranges min_vx = 0 max_vx = target.right # max speed is",
"positive than in negative) for velocity_x in range(min_vx, max_vx+1): min_vx = math.floor((1 +",
"right, bottom, top) assert False # Shouldn't reach return None def sign(_n): if",
"return True, max_height return False, 0 puzzle = Rect.from_input(\"target area: x=209..238, y=-86..-59\") example",
"False, 0 puzzle = Rect.from_input(\"target area: x=209..238, y=-86..-59\") example = Rect.from_input(\"target area: x=20..30,",
"<= x <= self.right) and (self.bottom <= y <= self.top) @staticmethod def from_input(string):",
"target.right for velocity_y in range(min_vy, max_vy+1): hit, maxy = hit_target(velocity_x, velocity_y, target) if",
"top def inside(self, x, y): \"\"\"Checks if a given x, y point is",
"maxy = hit_target(velocity_x, velocity_y, target) if hit: global_maxima = max(global_maxima, maxy) hit_count +=",
"(self.bottom <= y <= self.top) @staticmethod def from_input(string): match = re.search(r\"target area: x=(-?\\d*)..(-?\\d*),",
"the same range in positive than in negative) for velocity_x in range(min_vx, max_vx+1):",
"velocity_y max_height = max(max_height, probe_y) velocity_x -= sign(velocity_x) velocity_y -= 1 _t +=",
"the right of the area in t=1 min_vy = min(target.bottom, target.top) # use",
"right self.bottom = bottom self.top = top def inside(self, x, y): \"\"\"Checks if",
"max_height = 0 while probe_x < target.right and probe_y > target.bottom: probe_x +=",
"-= 1 _t += 1 if target.inside(probe_x, probe_y): return True, max_height return False,",
"= -min_vy # not much thinkin here (explore the same range in positive",
"probe reaches the target area in a discrete t and, in that case,",
"string) if match: left = int(match.group(1)) right = int(match.group(2)) bottom = int(match.group(3)) top",
"0 while probe_x < target.right and probe_y > target.bottom: probe_x += velocity_x probe_y",
"y <= self.top) @staticmethod def from_input(string): match = re.search(r\"target area: x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\", string)",
"both_parts_bruteforce(target): global_maxima = 0 hit_count = 0 # do a smart brute-force over",
"bottom, top) assert False # Shouldn't reach return None def sign(_n): if _n",
"+= 1 print(f\"What is the highest y position it reaches on this trajectory?",
"min_vy = min(target.bottom, target.top) # use the same reasoning as for maxvy max_vy",
"range(min_vx, max_vx+1): min_vx = math.floor((1 + math.sqrt(1 + target.left * 8)) / 2)",
"the same reasoning as for maxvy max_vy = -min_vy # not much thinkin",
"x <= self.right) and (self.bottom <= y <= self.top) @staticmethod def from_input(string): match",
"int(match.group(1)) right = int(match.group(2)) bottom = int(match.group(3)) top = int(match.group(4)) return Rect(left, right,",
"if hit: global_maxima = max(global_maxima, maxy) hit_count += 1 print(f\"What is the highest",
"velocity_x in range(min_vx, max_vx+1): min_vx = math.floor((1 + math.sqrt(1 + target.left * 8))",
"if target.inside(probe_x, probe_y): return True, max_height return False, 0 puzzle = Rect.from_input(\"target area:",
"= 0 _t = 0 max_height = 0 while probe_x < target.right and",
"in range(min_vx, max_vx+1): min_vx = math.floor((1 + math.sqrt(1 + target.left * 8)) /",
"@staticmethod def from_input(string): match = re.search(r\"target area: x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\", string) if match: left",
"int(match.group(2)) bottom = int(match.group(3)) top = int(match.group(4)) return Rect(left, right, bottom, top) assert",
"sign(_n): if _n > 0: return 1 if _n < 0: return -1",
"-= sign(velocity_x) velocity_y -= 1 _t += 1 if target.inside(probe_x, probe_y): return True,",
"= hit_target(velocity_x, velocity_y, target) if hit: global_maxima = max(global_maxima, maxy) hit_count += 1",
"int]: \"\"\"Simulate the probe shooting and check if the probe reaches the target",
"discrete t and, in that case, the maximum height it reaches on the",
"in t=1 min_vy = min(target.bottom, target.top) # use the same reasoning as for",
"match = re.search(r\"target area: x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\", string) if match: left = int(match.group(1)) right",
"_t += 1 if target.inside(probe_x, probe_y): return True, max_height return False, 0 puzzle",
"min_vx = 0 max_vx = target.right # max speed is hitting the right",
"in positive than in negative) for velocity_x in range(min_vx, max_vx+1): min_vx = math.floor((1",
"the rect\"\"\" return (self.left <= x <= self.right) and (self.bottom <= y <=",
"def inside(self, x, y): \"\"\"Checks if a given x, y point is inside",
"in that case, the maximum height it reaches on the trajectory.\"\"\" velocity_x =",
"than in negative) for velocity_x in range(min_vx, max_vx+1): min_vx = math.floor((1 + math.sqrt(1",
"it reaches on the trajectory.\"\"\" velocity_x = vx0 velocity_y = vy0 probe_x =",
"0 max_height = 0 while probe_x < target.right and probe_y > target.bottom: probe_x",
"+= velocity_x probe_y += velocity_y max_height = max(max_height, probe_y) velocity_x -= sign(velocity_x) velocity_y",
"probe_y > target.bottom: probe_x += velocity_x probe_y += velocity_y max_height = max(max_height, probe_y)",
"print(f\"What is the highest y position it reaches on this trajectory? {global_maxima}\") print(f\"How",
"hit_target(vx0, vy0, target:Rect) -> Tuple[bool, int]: \"\"\"Simulate the probe shooting and check if",
"max_height = max(max_height, probe_y) velocity_x -= sign(velocity_x) velocity_y -= 1 _t += 1",
"example = Rect.from_input(\"target area: x=20..30, y=-10..-5\") def both_parts_bruteforce(target): global_maxima = 0 hit_count =",
"# Shouldn't reach return None def sign(_n): if _n > 0: return 1",
"= Rect.from_input(\"target area: x=209..238, y=-86..-59\") example = Rect.from_input(\"target area: x=20..30, y=-10..-5\") def both_parts_bruteforce(target):",
"a given x, y point is inside the rect\"\"\" return (self.left <= x",
"trajectory.\"\"\" velocity_x = vx0 velocity_y = vy0 probe_x = 0 probe_y = 0",
"in a discrete t and, in that case, the maximum height it reaches",
"= vy0 probe_x = 0 probe_y = 0 _t = 0 max_height =",
"sign(velocity_x) velocity_y -= 1 _t += 1 if target.inside(probe_x, probe_y): return True, max_height",
"use the same reasoning as for maxvy max_vy = -min_vy # not much",
"from typing import Tuple class Rect: \"\"\"A 2D rectangle defined by top-left and",
"# not much thinkin here (explore the same range in positive than in",
"target:Rect) -> Tuple[bool, int]: \"\"\"Simulate the probe shooting and check if the probe",
"* 8)) / 2) max_vx = target.right for velocity_y in range(min_vy, max_vy+1): hit,",
"max speed is hitting the right of the area in t=1 min_vy =",
"return (self.left <= x <= self.right) and (self.bottom <= y <= self.top) @staticmethod",
"is hitting the right of the area in t=1 min_vy = min(target.bottom, target.top)",
"0 _t = 0 max_height = 0 while probe_x < target.right and probe_y",
"velocity_y = vy0 probe_x = 0 probe_y = 0 _t = 0 max_height",
"positions\"\"\" def __init__(self, left, right, bottom, top): self.left = left self.right = right",
"re import math from typing import Tuple class Rect: \"\"\"A 2D rectangle defined",
"and probe_y > target.bottom: probe_x += velocity_x probe_y += velocity_y max_height = max(max_height,",
"__init__(self, left, right, bottom, top): self.left = left self.right = right self.bottom =",
"+= velocity_y max_height = max(max_height, probe_y) velocity_x -= sign(velocity_x) velocity_y -= 1 _t",
"velocity_y -= 1 _t += 1 if target.inside(probe_x, probe_y): return True, max_height return",
"the highest y position it reaches on this trajectory? {global_maxima}\") print(f\"How many distinct",
"thinkin here (explore the same range in positive than in negative) for velocity_x",
"y position it reaches on this trajectory? {global_maxima}\") print(f\"How many distinct initial velocity",
"< 0: return -1 return 0 def hit_target(vx0, vy0, target:Rect) -> Tuple[bool, int]:",
"left self.right = right self.bottom = bottom self.top = top def inside(self, x,",
"much thinkin here (explore the same range in positive than in negative) for",
"Rect.from_input(\"target area: x=209..238, y=-86..-59\") example = Rect.from_input(\"target area: x=20..30, y=-10..-5\") def both_parts_bruteforce(target): global_maxima",
"(self.left <= x <= self.right) and (self.bottom <= y <= self.top) @staticmethod def",
"= 0 probe_y = 0 _t = 0 max_height = 0 while probe_x",
"velocity_y, target) if hit: global_maxima = max(global_maxima, maxy) hit_count += 1 print(f\"What is",
"= min(target.bottom, target.top) # use the same reasoning as for maxvy max_vy =",
"probe_y = 0 _t = 0 max_height = 0 while probe_x < target.right",
"max_vy = -min_vy # not much thinkin here (explore the same range in",
"target.top) # use the same reasoning as for maxvy max_vy = -min_vy #",
"velocity_x probe_y += velocity_y max_height = max(max_height, probe_y) velocity_x -= sign(velocity_x) velocity_y -=",
"if _n > 0: return 1 if _n < 0: return -1 return",
"vx0 velocity_y = vy0 probe_x = 0 probe_y = 0 _t = 0",
"int(match.group(4)) return Rect(left, right, bottom, top) assert False # Shouldn't reach return None",
"= bottom self.top = top def inside(self, x, y): \"\"\"Checks if a given",
"/ 2) max_vx = target.right for velocity_y in range(min_vy, max_vy+1): hit, maxy =",
"= left self.right = right self.bottom = bottom self.top = top def inside(self,",
"\"\"\" import re import math from typing import Tuple class Rect: \"\"\"A 2D",
"inside(self, x, y): \"\"\"Checks if a given x, y point is inside the",
"area: x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\", string) if match: left = int(match.group(1)) right = int(match.group(2)) bottom",
"trajectory? {global_maxima}\") print(f\"How many distinct initial velocity values cause the probe to be",
"probe reaches the target area. Returns wether probe reaches the target area in",
"return Rect(left, right, bottom, top) assert False # Shouldn't reach return None def",
"from_input(string): match = re.search(r\"target area: x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\", string) if match: left = int(match.group(1))",
"= target.right # max speed is hitting the right of the area in",
"maxvy max_vy = -min_vy # not much thinkin here (explore the same range",
"case, the maximum height it reaches on the trajectory.\"\"\" velocity_x = vx0 velocity_y",
"rect\"\"\" return (self.left <= x <= self.right) and (self.bottom <= y <= self.top)",
"and bottom-right positions\"\"\" def __init__(self, left, right, bottom, top): self.left = left self.right",
"int(match.group(3)) top = int(match.group(4)) return Rect(left, right, bottom, top) assert False # Shouldn't",
"import Tuple class Rect: \"\"\"A 2D rectangle defined by top-left and bottom-right positions\"\"\"",
"math.floor((1 + math.sqrt(1 + target.left * 8)) / 2) max_vx = target.right for",
"range(min_vy, max_vy+1): hit, maxy = hit_target(velocity_x, velocity_y, target) if hit: global_maxima = max(global_maxima,",
"target area in a discrete t and, in that case, the maximum height",
"as for maxvy max_vy = -min_vy # not much thinkin here (explore the",
"0 # do a smart brute-force over sensible ranges min_vx = 0 max_vx",
"hitting the right of the area in t=1 min_vy = min(target.bottom, target.top) #",
"many distinct initial velocity values cause the probe to be within the target",
"if _n < 0: return -1 return 0 def hit_target(vx0, vy0, target:Rect) ->",
"bottom self.top = top def inside(self, x, y): \"\"\"Checks if a given x,",
"-1 return 0 def hit_target(vx0, vy0, target:Rect) -> Tuple[bool, int]: \"\"\"Simulate the probe",
"reach return None def sign(_n): if _n > 0: return 1 if _n",
"a smart brute-force over sensible ranges min_vx = 0 max_vx = target.right #",
"return 0 def hit_target(vx0, vy0, target:Rect) -> Tuple[bool, int]: \"\"\"Simulate the probe shooting",
"-> Tuple[bool, int]: \"\"\"Simulate the probe shooting and check if the probe reaches",
"right = int(match.group(2)) bottom = int(match.group(3)) top = int(match.group(4)) return Rect(left, right, bottom,",
"y point is inside the rect\"\"\" return (self.left <= x <= self.right) and",
"x=209..238, y=-86..-59\") example = Rect.from_input(\"target area: x=20..30, y=-10..-5\") def both_parts_bruteforce(target): global_maxima = 0",
"shooting and check if the probe reaches the target area. Returns wether probe",
"left = int(match.group(1)) right = int(match.group(2)) bottom = int(match.group(3)) top = int(match.group(4)) return",
"<= self.top) @staticmethod def from_input(string): match = re.search(r\"target area: x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\", string) if",
"\"\"\" https://adventofcode.com/2021/day/17 \"\"\" import re import math from typing import Tuple class Rect:",
"if a given x, y point is inside the rect\"\"\" return (self.left <=",
"top = int(match.group(4)) return Rect(left, right, bottom, top) assert False # Shouldn't reach",
"0 hit_count = 0 # do a smart brute-force over sensible ranges min_vx",
"t and, in that case, the maximum height it reaches on the trajectory.\"\"\"",
"-min_vy # not much thinkin here (explore the same range in positive than",
"= right self.bottom = bottom self.top = top def inside(self, x, y): \"\"\"Checks",
"y=(-?\\d*)..(-?\\d*)\", string) if match: left = int(match.group(1)) right = int(match.group(2)) bottom = int(match.group(3))",
"max(max_height, probe_y) velocity_x -= sign(velocity_x) velocity_y -= 1 _t += 1 if target.inside(probe_x,",
"same range in positive than in negative) for velocity_x in range(min_vx, max_vx+1): min_vx",
"maximum height it reaches on the trajectory.\"\"\" velocity_x = vx0 velocity_y = vy0",
"check if the probe reaches the target area. Returns wether probe reaches the",
"0 puzzle = Rect.from_input(\"target area: x=209..238, y=-86..-59\") example = Rect.from_input(\"target area: x=20..30, y=-10..-5\")",
"hit_count = 0 # do a smart brute-force over sensible ranges min_vx =",
"= max(global_maxima, maxy) hit_count += 1 print(f\"What is the highest y position it",
"Rect.from_input(\"target area: x=20..30, y=-10..-5\") def both_parts_bruteforce(target): global_maxima = 0 hit_count = 0 #",
"2D rectangle defined by top-left and bottom-right positions\"\"\" def __init__(self, left, right, bottom,",
"and check if the probe reaches the target area. Returns wether probe reaches",
"= int(match.group(3)) top = int(match.group(4)) return Rect(left, right, bottom, top) assert False #",
"area in a discrete t and, in that case, the maximum height it",
"+ target.left * 8)) / 2) max_vx = target.right for velocity_y in range(min_vy,",
"0 def hit_target(vx0, vy0, target:Rect) -> Tuple[bool, int]: \"\"\"Simulate the probe shooting and",
"reaches on this trajectory? {global_maxima}\") print(f\"How many distinct initial velocity values cause the",
"that case, the maximum height it reaches on the trajectory.\"\"\" velocity_x = vx0",
"y=-10..-5\") def both_parts_bruteforce(target): global_maxima = 0 hit_count = 0 # do a smart",
"vy0, target:Rect) -> Tuple[bool, int]: \"\"\"Simulate the probe shooting and check if the",
"target.right and probe_y > target.bottom: probe_x += velocity_x probe_y += velocity_y max_height =",
"# max speed is hitting the right of the area in t=1 min_vy",
"initial velocity values cause the probe to be within the target area after",
"min_vx = math.floor((1 + math.sqrt(1 + target.left * 8)) / 2) max_vx =",
"while probe_x < target.right and probe_y > target.bottom: probe_x += velocity_x probe_y +=",
"return False, 0 puzzle = Rect.from_input(\"target area: x=209..238, y=-86..-59\") example = Rect.from_input(\"target area:",
"it reaches on this trajectory? {global_maxima}\") print(f\"How many distinct initial velocity values cause",
"# do a smart brute-force over sensible ranges min_vx = 0 max_vx =",
"(explore the same range in positive than in negative) for velocity_x in range(min_vx,",
"reaches on the trajectory.\"\"\" velocity_x = vx0 velocity_y = vy0 probe_x = 0",
"def hit_target(vx0, vy0, target:Rect) -> Tuple[bool, int]: \"\"\"Simulate the probe shooting and check",
"a discrete t and, in that case, the maximum height it reaches on",
"is the highest y position it reaches on this trajectory? {global_maxima}\") print(f\"How many",
"area: x=20..30, y=-10..-5\") def both_parts_bruteforce(target): global_maxima = 0 hit_count = 0 # do",
"hit, maxy = hit_target(velocity_x, velocity_y, target) if hit: global_maxima = max(global_maxima, maxy) hit_count",
"hit_count += 1 print(f\"What is the highest y position it reaches on this",
"brute-force over sensible ranges min_vx = 0 max_vx = target.right # max speed",
"top) assert False # Shouldn't reach return None def sign(_n): if _n >",
"sensible ranges min_vx = 0 max_vx = target.right # max speed is hitting",
"target.inside(probe_x, probe_y): return True, max_height return False, 0 puzzle = Rect.from_input(\"target area: x=209..238,",
"> target.bottom: probe_x += velocity_x probe_y += velocity_y max_height = max(max_height, probe_y) velocity_x",
"Tuple[bool, int]: \"\"\"Simulate the probe shooting and check if the probe reaches the",
"Returns wether probe reaches the target area in a discrete t and, in",
"target area. Returns wether probe reaches the target area in a discrete t",
"and (self.bottom <= y <= self.top) @staticmethod def from_input(string): match = re.search(r\"target area:",
"x, y point is inside the rect\"\"\" return (self.left <= x <= self.right)",
"the probe to be within the target area after any step?: {hit_count}\") both_parts_bruteforce(example)",
"position it reaches on this trajectory? {global_maxima}\") print(f\"How many distinct initial velocity values",
"left, right, bottom, top): self.left = left self.right = right self.bottom = bottom",
"here (explore the same range in positive than in negative) for velocity_x in",
"https://adventofcode.com/2021/day/17 \"\"\" import re import math from typing import Tuple class Rect: \"\"\"A",
"the probe shooting and check if the probe reaches the target area. Returns",
"hit_target(velocity_x, velocity_y, target) if hit: global_maxima = max(global_maxima, maxy) hit_count += 1 print(f\"What",
"self.top) @staticmethod def from_input(string): match = re.search(r\"target area: x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\", string) if match:",
"velocity_x = vx0 velocity_y = vy0 probe_x = 0 probe_y = 0 _t",
"y=-86..-59\") example = Rect.from_input(\"target area: x=20..30, y=-10..-5\") def both_parts_bruteforce(target): global_maxima = 0 hit_count",
"the trajectory.\"\"\" velocity_x = vx0 velocity_y = vy0 probe_x = 0 probe_y =",
"return None def sign(_n): if _n > 0: return 1 if _n <",
"area in t=1 min_vy = min(target.bottom, target.top) # use the same reasoning as",
"def both_parts_bruteforce(target): global_maxima = 0 hit_count = 0 # do a smart brute-force",
"_n > 0: return 1 if _n < 0: return -1 return 0",
"given x, y point is inside the rect\"\"\" return (self.left <= x <=",
"not much thinkin here (explore the same range in positive than in negative)",
"same reasoning as for maxvy max_vy = -min_vy # not much thinkin here",
"1 if _n < 0: return -1 return 0 def hit_target(vx0, vy0, target:Rect)",
"max_vx+1): min_vx = math.floor((1 + math.sqrt(1 + target.left * 8)) / 2) max_vx",
"the area in t=1 min_vy = min(target.bottom, target.top) # use the same reasoning",
"speed is hitting the right of the area in t=1 min_vy = min(target.bottom,",
"velocity values cause the probe to be within the target area after any",
"rectangle defined by top-left and bottom-right positions\"\"\" def __init__(self, left, right, bottom, top):",
"return 1 if _n < 0: return -1 return 0 def hit_target(vx0, vy0,",
"typing import Tuple class Rect: \"\"\"A 2D rectangle defined by top-left and bottom-right",
"max_vx = target.right for velocity_y in range(min_vy, max_vy+1): hit, maxy = hit_target(velocity_x, velocity_y,",
"\"\"\"A 2D rectangle defined by top-left and bottom-right positions\"\"\" def __init__(self, left, right,",
"Shouldn't reach return None def sign(_n): if _n > 0: return 1 if",
"False # Shouldn't reach return None def sign(_n): if _n > 0: return",
"area: x=209..238, y=-86..-59\") example = Rect.from_input(\"target area: x=20..30, y=-10..-5\") def both_parts_bruteforce(target): global_maxima =",
"1 print(f\"What is the highest y position it reaches on this trajectory? {global_maxima}\")",
"= top def inside(self, x, y): \"\"\"Checks if a given x, y point",
"bottom = int(match.group(3)) top = int(match.group(4)) return Rect(left, right, bottom, top) assert False",
"defined by top-left and bottom-right positions\"\"\" def __init__(self, left, right, bottom, top): self.left",
"= re.search(r\"target area: x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\", string) if match: left = int(match.group(1)) right =",
"target.bottom: probe_x += velocity_x probe_y += velocity_y max_height = max(max_height, probe_y) velocity_x -=",
"probe_y): return True, max_height return False, 0 puzzle = Rect.from_input(\"target area: x=209..238, y=-86..-59\")",
"<= y <= self.top) @staticmethod def from_input(string): match = re.search(r\"target area: x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\",",
"reasoning as for maxvy max_vy = -min_vy # not much thinkin here (explore",
"reaches the target area. Returns wether probe reaches the target area in a",
"on this trajectory? {global_maxima}\") print(f\"How many distinct initial velocity values cause the probe",
"top-left and bottom-right positions\"\"\" def __init__(self, left, right, bottom, top): self.left = left",
"+= 1 if target.inside(probe_x, probe_y): return True, max_height return False, 0 puzzle =",
"negative) for velocity_x in range(min_vx, max_vx+1): min_vx = math.floor((1 + math.sqrt(1 + target.left",
"max_height return False, 0 puzzle = Rect.from_input(\"target area: x=209..238, y=-86..-59\") example = Rect.from_input(\"target",
"target.right # max speed is hitting the right of the area in t=1",
"cause the probe to be within the target area after any step?: {hit_count}\")",
"point is inside the rect\"\"\" return (self.left <= x <= self.right) and (self.bottom",
"global_maxima = max(global_maxima, maxy) hit_count += 1 print(f\"What is the highest y position",
"by top-left and bottom-right positions\"\"\" def __init__(self, left, right, bottom, top): self.left =",
"None def sign(_n): if _n > 0: return 1 if _n < 0:",
"0: return -1 return 0 def hit_target(vx0, vy0, target:Rect) -> Tuple[bool, int]: \"\"\"Simulate",
"max(global_maxima, maxy) hit_count += 1 print(f\"What is the highest y position it reaches",
"probe to be within the target area after any step?: {hit_count}\") both_parts_bruteforce(example) both_parts_bruteforce(puzzle)",
"x=(-?\\d*)..(-?\\d*), y=(-?\\d*)..(-?\\d*)\", string) if match: left = int(match.group(1)) right = int(match.group(2)) bottom =",
"x=20..30, y=-10..-5\") def both_parts_bruteforce(target): global_maxima = 0 hit_count = 0 # do a",
"inside the rect\"\"\" return (self.left <= x <= self.right) and (self.bottom <= y",
"right of the area in t=1 min_vy = min(target.bottom, target.top) # use the",
"1 _t += 1 if target.inside(probe_x, probe_y): return True, max_height return False, 0",
"velocity_y in range(min_vy, max_vy+1): hit, maxy = hit_target(velocity_x, velocity_y, target) if hit: global_maxima",
"highest y position it reaches on this trajectory? {global_maxima}\") print(f\"How many distinct initial",
"def sign(_n): if _n > 0: return 1 if _n < 0: return",
"\"\"\"Simulate the probe shooting and check if the probe reaches the target area.",
"print(f\"How many distinct initial velocity values cause the probe to be within the",
"import re import math from typing import Tuple class Rect: \"\"\"A 2D rectangle",
"min(target.bottom, target.top) # use the same reasoning as for maxvy max_vy = -min_vy",
"= 0 max_vx = target.right # max speed is hitting the right of",
"# use the same reasoning as for maxvy max_vy = -min_vy # not",
"global_maxima = 0 hit_count = 0 # do a smart brute-force over sensible",
"def __init__(self, left, right, bottom, top): self.left = left self.right = right self.bottom",
"t=1 min_vy = min(target.bottom, target.top) # use the same reasoning as for maxvy",
"for velocity_y in range(min_vy, max_vy+1): hit, maxy = hit_target(velocity_x, velocity_y, target) if hit:",
"values cause the probe to be within the target area after any step?:",
"target.left * 8)) / 2) max_vx = target.right for velocity_y in range(min_vy, max_vy+1):",
"do a smart brute-force over sensible ranges min_vx = 0 max_vx = target.right"
] |
[
"path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'), path('queue/', view_queue, name='queue'), path('privacy/', view_privacy, name='privacy'), path('requests/', view_requests, name='requests'), path('add/request/',",
"view_add_update_request, view_add_to_queue_required, view_delete_from_queue_required, view_delete_request_required, view_delete_response_required, view_add_response_required, ) from django.contrib.auth import views as auth_views",
"path('queue/', view_queue, name='queue'), path('privacy/', view_privacy, name='privacy'), path('requests/', view_requests, name='requests'), path('add/request/', view_add_update_request, name='add-request'), path('update/request/<int:pk>/',",
"path('dashboard/', view_dashboard, name='dashboard'), path('profile/', view_profile, name='profile'), path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'), path('queue/', view_queue, name='queue'), path('privacy/',",
"view_add_response_required, ) from django.contrib.auth import views as auth_views app_name = 'application' urlpatterns =",
"name='requests'), path('add/request/', view_add_update_request, name='add-request'), path('update/request/<int:pk>/', view_add_update_request, name='update-request'), path('request/description/<int:pk>/', view_request_description, name='request-description'), path('responses/', view_responses, name='responses'),",
"url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate, name='activate'), path('', view_home, name='home'), path('dashboard/', view_dashboard, name='dashboard'), path('profile/', view_profile, name='profile'), path('dashboard/<int:pk>/',",
"as auth_views app_name = 'application' urlpatterns = [ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate, name='activate'), path('', view_home,",
"view_activate, view_profile, view_request_description, view_add_update_request, view_add_to_queue_required, view_delete_from_queue_required, view_delete_request_required, view_delete_response_required, view_add_response_required, ) from django.contrib.auth import",
"name='profile'), path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'), path('queue/', view_queue, name='queue'), path('privacy/', view_privacy, name='privacy'), path('requests/', view_requests, name='requests'),",
"view_delete_request_required, name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'), path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'), path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'), path('queue/delete/<int:pk>/', view_delete_from_queue_required,",
"view_dashboard, name='dashboard'), path('profile/', view_profile, name='profile'), path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'), path('queue/', view_queue, name='queue'), path('privacy/', view_privacy,",
"from django.urls import path, include from django.conf.urls import url from .views import (",
"path, include from django.conf.urls import url from .views import ( view_dashboard, view_home, view_queue,",
"import views as auth_views app_name = 'application' urlpatterns = [ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate, name='activate'),",
"view_logout, view_signup, view_activate, view_profile, view_request_description, view_add_update_request, view_add_to_queue_required, view_delete_from_queue_required, view_delete_request_required, view_delete_response_required, view_add_response_required, ) from",
"name='activate'), path('', view_home, name='home'), path('dashboard/', view_dashboard, name='dashboard'), path('profile/', view_profile, name='profile'), path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'),",
"view_login, name='login'), path('accounts/logout/', view_logout, name='logout'), path('signup/', view_signup, name='signup'), path('update/request/', view_responses, name='update-request'), path('settings/', view_privacy,",
"view_add_update_request, name='add-request'), path('update/request/<int:pk>/', view_add_update_request, name='update-request'), path('request/description/<int:pk>/', view_request_description, name='request-description'), path('responses/', view_responses, name='responses'), path('response/', view_responses,",
"name='logout'), path('signup/', view_signup, name='signup'), path('update/request/', view_responses, name='update-request'), path('settings/', view_privacy, name='settings'), path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'),",
"view_request_description, view_add_update_request, view_add_to_queue_required, view_delete_from_queue_required, view_delete_request_required, view_delete_response_required, view_add_response_required, ) from django.contrib.auth import views as",
"name='add-to-queue-required'), path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'), path('queue/delete/<int:pk>/', view_delete_from_queue_required, name='delete-from-queue-required'), path( 'change-password/', auth_views.PasswordChangeView.as_view( template_name='application/password_change.html', success_url='/' ),",
"( view_dashboard, view_home, view_queue, view_privacy, view_requests, view_responses, view_login, view_logout, view_signup, view_activate, view_profile, view_request_description,",
"import ( view_dashboard, view_home, view_queue, view_privacy, view_requests, view_responses, view_login, view_logout, view_signup, view_activate, view_profile,",
"path('requests/', view_requests, name='requests'), path('add/request/', view_add_update_request, name='add-request'), path('update/request/<int:pk>/', view_add_update_request, name='update-request'), path('request/description/<int:pk>/', view_request_description, name='request-description'), path('responses/',",
".views import ( view_dashboard, view_home, view_queue, view_privacy, view_requests, view_responses, view_login, view_logout, view_signup, view_activate,",
"view_activate, name='activate'), path('', view_home, name='home'), path('dashboard/', view_dashboard, name='dashboard'), path('profile/', view_profile, name='profile'), path('dashboard/<int:pk>/', view_dashboard,",
"path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'), path('queue/delete/<int:pk>/', view_delete_from_queue_required, name='delete-from-queue-required'), path( 'change-password/', auth_views.PasswordChangeView.as_view( template_name='application/password_change.html', success_url='/' ), name='change_password'",
"name='home'), path('dashboard/', view_dashboard, name='dashboard'), path('profile/', view_profile, name='profile'), path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'), path('queue/', view_queue, name='queue'),",
"view_delete_from_queue_required, view_delete_request_required, view_delete_response_required, view_add_response_required, ) from django.contrib.auth import views as auth_views app_name =",
"view_add_to_queue_required, view_delete_from_queue_required, view_delete_request_required, view_delete_response_required, view_add_response_required, ) from django.contrib.auth import views as auth_views app_name",
"name='queue'), path('privacy/', view_privacy, name='privacy'), path('requests/', view_requests, name='requests'), path('add/request/', view_add_update_request, name='add-request'), path('update/request/<int:pk>/', view_add_update_request, name='update-request'),",
"view_request_description, name='request-description'), path('responses/', view_responses, name='responses'), path('response/', view_responses, name='response'), path('accounts/login/', view_login, name='login'), path('accounts/logout/', view_logout,",
"name='privacy'), path('requests/', view_requests, name='requests'), path('add/request/', view_add_update_request, name='add-request'), path('update/request/<int:pk>/', view_add_update_request, name='update-request'), path('request/description/<int:pk>/', view_request_description, name='request-description'),",
"view_responses, name='responses'), path('response/', view_responses, name='response'), path('accounts/login/', view_login, name='login'), path('accounts/logout/', view_logout, name='logout'), path('signup/', view_signup,",
"name='settings'), path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'), path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'), path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'),",
"view_privacy, name='privacy'), path('requests/', view_requests, name='requests'), path('add/request/', view_add_update_request, name='add-request'), path('update/request/<int:pk>/', view_add_update_request, name='update-request'), path('request/description/<int:pk>/', view_request_description,",
"view_queue, name='queue'), path('privacy/', view_privacy, name='privacy'), path('requests/', view_requests, name='requests'), path('add/request/', view_add_update_request, name='add-request'), path('update/request/<int:pk>/', view_add_update_request,",
"name='signup'), path('update/request/', view_responses, name='update-request'), path('settings/', view_privacy, name='settings'), path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'),",
"path('privacy/', view_privacy, name='privacy'), path('requests/', view_requests, name='requests'), path('add/request/', view_add_update_request, name='add-request'), path('update/request/<int:pk>/', view_add_update_request, name='update-request'), path('request/description/<int:pk>/',",
"path('signup/', view_signup, name='signup'), path('update/request/', view_responses, name='update-request'), path('settings/', view_privacy, name='settings'), path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/',",
"path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'), path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'), path('queue/delete/<int:pk>/', view_delete_from_queue_required, name='delete-from-queue-required'), path( 'change-password/', auth_views.PasswordChangeView.as_view( template_name='application/password_change.html',",
"view_profile, view_request_description, view_add_update_request, view_add_to_queue_required, view_delete_from_queue_required, view_delete_request_required, view_delete_response_required, view_add_response_required, ) from django.contrib.auth import views",
"view_profile, name='profile'), path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'), path('queue/', view_queue, name='queue'), path('privacy/', view_privacy, name='privacy'), path('requests/', view_requests,",
"view_responses, name='response'), path('accounts/login/', view_login, name='login'), path('accounts/logout/', view_logout, name='logout'), path('signup/', view_signup, name='signup'), path('update/request/', view_responses,",
"view_responses, name='update-request'), path('settings/', view_privacy, name='settings'), path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'), path('queue/add/<int:pk>/', view_add_to_queue_required,",
"view_dashboard, name='dashboard-specific'), path('queue/', view_queue, name='queue'), path('privacy/', view_privacy, name='privacy'), path('requests/', view_requests, name='requests'), path('add/request/', view_add_update_request,",
"view_privacy, view_requests, view_responses, view_login, view_logout, view_signup, view_activate, view_profile, view_request_description, view_add_update_request, view_add_to_queue_required, view_delete_from_queue_required, view_delete_request_required,",
"path('profile/', view_profile, name='profile'), path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'), path('queue/', view_queue, name='queue'), path('privacy/', view_privacy, name='privacy'), path('requests/',",
"django.urls import path, include from django.conf.urls import url from .views import ( view_dashboard,",
"from django.conf.urls import url from .views import ( view_dashboard, view_home, view_queue, view_privacy, view_requests,",
"path('response/', view_responses, name='response'), path('accounts/login/', view_login, name='login'), path('accounts/logout/', view_logout, name='logout'), path('signup/', view_signup, name='signup'), path('update/request/',",
"view_home, name='home'), path('dashboard/', view_dashboard, name='dashboard'), path('profile/', view_profile, name='profile'), path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'), path('queue/', view_queue,",
"views as auth_views app_name = 'application' urlpatterns = [ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate, name='activate'), path('',",
"[ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate, name='activate'), path('', view_home, name='home'), path('dashboard/', view_dashboard, name='dashboard'), path('profile/', view_profile, name='profile'),",
"path('settings/', view_privacy, name='settings'), path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'), path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'), path('add/response/<int:pk>/',",
"auth_views app_name = 'application' urlpatterns = [ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate, name='activate'), path('', view_home, name='home'),",
"= [ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate, name='activate'), path('', view_home, name='home'), path('dashboard/', view_dashboard, name='dashboard'), path('profile/', view_profile,",
"view_add_to_queue_required, name='add-to-queue-required'), path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'), path('queue/delete/<int:pk>/', view_delete_from_queue_required, name='delete-from-queue-required'), path( 'change-password/', auth_views.PasswordChangeView.as_view( template_name='application/password_change.html', success_url='/'",
"name='dashboard-specific'), path('queue/', view_queue, name='queue'), path('privacy/', view_privacy, name='privacy'), path('requests/', view_requests, name='requests'), path('add/request/', view_add_update_request, name='add-request'),",
"path('request/description/<int:pk>/', view_request_description, name='request-description'), path('responses/', view_responses, name='responses'), path('response/', view_responses, name='response'), path('accounts/login/', view_login, name='login'), path('accounts/logout/',",
"name='update-request'), path('request/description/<int:pk>/', view_request_description, name='request-description'), path('responses/', view_responses, name='responses'), path('response/', view_responses, name='response'), path('accounts/login/', view_login, name='login'),",
"import path, include from django.conf.urls import url from .views import ( view_dashboard, view_home,",
"name='request-description'), path('responses/', view_responses, name='responses'), path('response/', view_responses, name='response'), path('accounts/login/', view_login, name='login'), path('accounts/logout/', view_logout, name='logout'),",
"path('', view_home, name='home'), path('dashboard/', view_dashboard, name='dashboard'), path('profile/', view_profile, name='profile'), path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'), path('queue/',",
"view_delete_request_required, view_delete_response_required, view_add_response_required, ) from django.contrib.auth import views as auth_views app_name = 'application'",
"path('update/request/', view_responses, name='update-request'), path('settings/', view_privacy, name='settings'), path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'), path('queue/add/<int:pk>/',",
"urlpatterns = [ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate, name='activate'), path('', view_home, name='home'), path('dashboard/', view_dashboard, name='dashboard'), path('profile/',",
") from django.contrib.auth import views as auth_views app_name = 'application' urlpatterns = [",
"view_add_response_required, name='add-response-required'), path('queue/delete/<int:pk>/', view_delete_from_queue_required, name='delete-from-queue-required'), path( 'change-password/', auth_views.PasswordChangeView.as_view( template_name='application/password_change.html', success_url='/' ), name='change_password' ),",
"path('update/request/<int:pk>/', view_add_update_request, name='update-request'), path('request/description/<int:pk>/', view_request_description, name='request-description'), path('responses/', view_responses, name='responses'), path('response/', view_responses, name='response'), path('accounts/login/',",
"path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'), path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'), path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'), path('queue/delete/<int:pk>/', view_delete_from_queue_required, name='delete-from-queue-required'), path(",
"view_delete_response_required, view_add_response_required, ) from django.contrib.auth import views as auth_views app_name = 'application' urlpatterns",
"name='update-request'), path('settings/', view_privacy, name='settings'), path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'), path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'),",
"view_requests, view_responses, view_login, view_logout, view_signup, view_activate, view_profile, view_request_description, view_add_update_request, view_add_to_queue_required, view_delete_from_queue_required, view_delete_request_required, view_delete_response_required,",
"name='add-request'), path('update/request/<int:pk>/', view_add_update_request, name='update-request'), path('request/description/<int:pk>/', view_request_description, name='request-description'), path('responses/', view_responses, name='responses'), path('response/', view_responses, name='response'),",
"<filename>ehelp/application/urls.py from django.urls import path, include from django.conf.urls import url from .views import",
"view_dashboard, view_home, view_queue, view_privacy, view_requests, view_responses, view_login, view_logout, view_signup, view_activate, view_profile, view_request_description, view_add_update_request,",
"view_home, view_queue, view_privacy, view_requests, view_responses, view_login, view_logout, view_signup, view_activate, view_profile, view_request_description, view_add_update_request, view_add_to_queue_required,",
"name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'), path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'), path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'), path('queue/delete/<int:pk>/', view_delete_from_queue_required, name='delete-from-queue-required'),",
"name='dashboard'), path('profile/', view_profile, name='profile'), path('dashboard/<int:pk>/', view_dashboard, name='dashboard-specific'), path('queue/', view_queue, name='queue'), path('privacy/', view_privacy, name='privacy'),",
"path('accounts/login/', view_login, name='login'), path('accounts/logout/', view_logout, name='logout'), path('signup/', view_signup, name='signup'), path('update/request/', view_responses, name='update-request'), path('settings/',",
"view_requests, name='requests'), path('add/request/', view_add_update_request, name='add-request'), path('update/request/<int:pk>/', view_add_update_request, name='update-request'), path('request/description/<int:pk>/', view_request_description, name='request-description'), path('responses/', view_responses,",
"view_privacy, name='settings'), path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'), path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'), path('add/response/<int:pk>/', view_add_response_required,",
"name='delete-response-required'), path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'), path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'), path('queue/delete/<int:pk>/', view_delete_from_queue_required, name='delete-from-queue-required'), path( 'change-password/', auth_views.PasswordChangeView.as_view(",
"= 'application' urlpatterns = [ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate, name='activate'), path('', view_home, name='home'), path('dashboard/', view_dashboard,",
"from django.contrib.auth import views as auth_views app_name = 'application' urlpatterns = [ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$',",
"app_name = 'application' urlpatterns = [ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate, name='activate'), path('', view_home, name='home'), path('dashboard/',",
"view_delete_response_required, name='delete-response-required'), path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'), path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'), path('queue/delete/<int:pk>/', view_delete_from_queue_required, name='delete-from-queue-required'), path( 'change-password/',",
"path('accounts/logout/', view_logout, name='logout'), path('signup/', view_signup, name='signup'), path('update/request/', view_responses, name='update-request'), path('settings/', view_privacy, name='settings'), path('delete/request/<int:pk>/',",
"view_logout, name='logout'), path('signup/', view_signup, name='signup'), path('update/request/', view_responses, name='update-request'), path('settings/', view_privacy, name='settings'), path('delete/request/<int:pk>/', view_delete_request_required,",
"from .views import ( view_dashboard, view_home, view_queue, view_privacy, view_requests, view_responses, view_login, view_logout, view_signup,",
"include from django.conf.urls import url from .views import ( view_dashboard, view_home, view_queue, view_privacy,",
"name='responses'), path('response/', view_responses, name='response'), path('accounts/login/', view_login, name='login'), path('accounts/logout/', view_logout, name='logout'), path('signup/', view_signup, name='signup'),",
"view_login, view_logout, view_signup, view_activate, view_profile, view_request_description, view_add_update_request, view_add_to_queue_required, view_delete_from_queue_required, view_delete_request_required, view_delete_response_required, view_add_response_required, )",
"view_signup, view_activate, view_profile, view_request_description, view_add_update_request, view_add_to_queue_required, view_delete_from_queue_required, view_delete_request_required, view_delete_response_required, view_add_response_required, ) from django.contrib.auth",
"view_add_update_request, name='update-request'), path('request/description/<int:pk>/', view_request_description, name='request-description'), path('responses/', view_responses, name='responses'), path('response/', view_responses, name='response'), path('accounts/login/', view_login,",
"view_queue, view_privacy, view_requests, view_responses, view_login, view_logout, view_signup, view_activate, view_profile, view_request_description, view_add_update_request, view_add_to_queue_required, view_delete_from_queue_required,",
"name='login'), path('accounts/logout/', view_logout, name='logout'), path('signup/', view_signup, name='signup'), path('update/request/', view_responses, name='update-request'), path('settings/', view_privacy, name='settings'),",
"'application' urlpatterns = [ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate, name='activate'), path('', view_home, name='home'), path('dashboard/', view_dashboard, name='dashboard'),",
"view_responses, view_login, view_logout, view_signup, view_activate, view_profile, view_request_description, view_add_update_request, view_add_to_queue_required, view_delete_from_queue_required, view_delete_request_required, view_delete_response_required, view_add_response_required,",
"path('add/request/', view_add_update_request, name='add-request'), path('update/request/<int:pk>/', view_add_update_request, name='update-request'), path('request/description/<int:pk>/', view_request_description, name='request-description'), path('responses/', view_responses, name='responses'), path('response/',",
"django.contrib.auth import views as auth_views app_name = 'application' urlpatterns = [ url(r'^activate/(?P<uidb64>[0-9A-Za-z_\\-]+)/(?P<token>[<KEY>})/$', view_activate,",
"view_signup, name='signup'), path('update/request/', view_responses, name='update-request'), path('settings/', view_privacy, name='settings'), path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required,",
"import url from .views import ( view_dashboard, view_home, view_queue, view_privacy, view_requests, view_responses, view_login,",
"name='response'), path('accounts/login/', view_login, name='login'), path('accounts/logout/', view_logout, name='logout'), path('signup/', view_signup, name='signup'), path('update/request/', view_responses, name='update-request'),",
"url from .views import ( view_dashboard, view_home, view_queue, view_privacy, view_requests, view_responses, view_login, view_logout,",
"path('responses/', view_responses, name='responses'), path('response/', view_responses, name='response'), path('accounts/login/', view_login, name='login'), path('accounts/logout/', view_logout, name='logout'), path('signup/',",
"path('delete/request/<int:pk>/', view_delete_request_required, name='delete-request-required'), path('delete/response/<int:req_id>/<int:res_id>/', view_delete_response_required, name='delete-response-required'), path('queue/add/<int:pk>/', view_add_to_queue_required, name='add-to-queue-required'), path('add/response/<int:pk>/', view_add_response_required, name='add-response-required'), path('queue/delete/<int:pk>/',",
"name='add-response-required'), path('queue/delete/<int:pk>/', view_delete_from_queue_required, name='delete-from-queue-required'), path( 'change-password/', auth_views.PasswordChangeView.as_view( template_name='application/password_change.html', success_url='/' ), name='change_password' ), ]",
"django.conf.urls import url from .views import ( view_dashboard, view_home, view_queue, view_privacy, view_requests, view_responses,"
] |
[
"roots..') start = time() roots = Role.objects \\ .all() \\ .values_list('id', flat=True) stop",
"batch_role_ancestor_rebuilding(): for model in models: for obj in model.objects.iterator(): obj.save() def rebuild_role_hierarchy(apps, schema_editor):",
"seconds, rebuilding ancestry map' % (len(roots), stop - start)) start = time() Role.rebuild_role_ancestor_list(roots,",
"with batch_role_ancestor_rebuilding(): for model in models: for obj in model.objects.iterator(): obj.save() def rebuild_role_hierarchy(apps,",
"start = time() Role.rebuild_role_ancestor_list(roots, []) stop = time() logger.info('Rebuild completed in %f seconds'",
"that happens for every object in the system before we get busy with",
"time() Role.rebuild_role_ancestor_list(roots, []) stop = time() logger.info('Rebuild completed in %f seconds' % (stop",
"of our resource types and call .save() to ensure all that happens for",
"for users a little differently. ''' models = [ apps.get_model('main', m) for m",
"stop = time() logger.info('Found %d roots in %f seconds, rebuilding ancestry map' %",
"[]) stop = time() logger.info('Rebuild completed in %f seconds' % (stop - start))",
"in %f seconds' % (stop - start)) logger.info('Done.') def delete_all_user_roles(apps, schema_editor): ContentType =",
"in the system before we get busy with the actual migration work. This",
"logging.getLogger('rbac_migrations') def create_roles(apps, schema_editor): ''' Implicit role creation happens in our post_save hook",
"for model in models: for obj in model.objects.iterator(): obj.save() def rebuild_role_hierarchy(apps, schema_editor): logger.info('Computing",
"differently. ''' models = [ apps.get_model('main', m) for m in [ 'Organization', 'Team',",
"% (stop - start)) logger.info('Done.') def delete_all_user_roles(apps, schema_editor): ContentType = apps.get_model('contenttypes', \"ContentType\") Role",
"batch_role_ancestor_rebuilding logger = logging.getLogger('rbac_migrations') def create_roles(apps, schema_editor): ''' Implicit role creation happens in",
"= apps.get_model('contenttypes', \"ContentType\") Role = apps.get_model('main', \"Role\") User = apps.get_model('auth', \"User\") user_content_type =",
"delete_all_user_roles(apps, schema_editor): ContentType = apps.get_model('contenttypes', \"ContentType\") Role = apps.get_model('main', \"Role\") User = apps.get_model('auth',",
"creation for users a little differently. ''' models = [ apps.get_model('main', m) for",
"models: for obj in model.objects.iterator(): obj.save() def rebuild_role_hierarchy(apps, schema_editor): logger.info('Computing role roots..') start",
"types and call .save() to ensure all that happens for every object in",
"gets run after migrate_users, which does role creation for users a little differently.",
"little differently. ''' models = [ apps.get_model('main', m) for m in [ 'Organization',",
"'Organization', 'Team', 'Inventory', 'Project', 'Credential', 'CustomInventoryScript', 'JobTemplate', ] ] with batch_role_ancestor_rebuilding(): for model",
"'CustomInventoryScript', 'JobTemplate', ] ] with batch_role_ancestor_rebuilding(): for model in models: for obj in",
"get busy with the actual migration work. This gets run after migrate_users, which",
"busy with the actual migration work. This gets run after migrate_users, which does",
"in %f seconds, rebuilding ancestry map' % (len(roots), stop - start)) start =",
"= logging.getLogger('rbac_migrations') def create_roles(apps, schema_editor): ''' Implicit role creation happens in our post_save",
"a little differently. ''' models = [ apps.get_model('main', m) for m in [",
"post_save hook for all of our resources. Here we iterate through all of",
"apps.get_model('main', \"Role\") User = apps.get_model('auth', \"User\") user_content_type = ContentType.objects.get_for_model(User) for role in Role.objects.filter(content_type=user_content_type).iterator():",
"after migrate_users, which does role creation for users a little differently. ''' models",
"in model.objects.iterator(): obj.save() def rebuild_role_hierarchy(apps, schema_editor): logger.info('Computing role roots..') start = time() roots",
"role roots..') start = time() roots = Role.objects \\ .all() \\ .values_list('id', flat=True)",
"''' models = [ apps.get_model('main', m) for m in [ 'Organization', 'Team', 'Inventory',",
"time from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding logger = logging.getLogger('rbac_migrations') def create_roles(apps, schema_editor): '''",
"= time() Role.rebuild_role_ancestor_list(roots, []) stop = time() logger.info('Rebuild completed in %f seconds' %",
"all of our resource types and call .save() to ensure all that happens",
"start)) logger.info('Done.') def delete_all_user_roles(apps, schema_editor): ContentType = apps.get_model('contenttypes', \"ContentType\") Role = apps.get_model('main', \"Role\")",
"users a little differently. ''' models = [ apps.get_model('main', m) for m in",
"time() logger.info('Rebuild completed in %f seconds' % (stop - start)) logger.info('Done.') def delete_all_user_roles(apps,",
"logger = logging.getLogger('rbac_migrations') def create_roles(apps, schema_editor): ''' Implicit role creation happens in our",
"\"Role\") User = apps.get_model('auth', \"User\") user_content_type = ContentType.objects.get_for_model(User) for role in Role.objects.filter(content_type=user_content_type).iterator(): role.delete()",
"import time from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding logger = logging.getLogger('rbac_migrations') def create_roles(apps, schema_editor):",
"to ensure all that happens for every object in the system before we",
"[ 'Organization', 'Team', 'Inventory', 'Project', 'Credential', 'CustomInventoryScript', 'JobTemplate', ] ] with batch_role_ancestor_rebuilding(): for",
"= time() roots = Role.objects \\ .all() \\ .values_list('id', flat=True) stop = time()",
"def create_roles(apps, schema_editor): ''' Implicit role creation happens in our post_save hook for",
"iterate through all of our resource types and call .save() to ensure all",
"%f seconds, rebuilding ancestry map' % (len(roots), stop - start)) start = time()",
"logging from time import time from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding logger = logging.getLogger('rbac_migrations')",
"creation happens in our post_save hook for all of our resources. Here we",
"run after migrate_users, which does role creation for users a little differently. '''",
"start = time() roots = Role.objects \\ .all() \\ .values_list('id', flat=True) stop =",
"roots = Role.objects \\ .all() \\ .values_list('id', flat=True) stop = time() logger.info('Found %d",
"and call .save() to ensure all that happens for every object in the",
"[ apps.get_model('main', m) for m in [ 'Organization', 'Team', 'Inventory', 'Project', 'Credential', 'CustomInventoryScript',",
"= Role.objects \\ .all() \\ .values_list('id', flat=True) stop = time() logger.info('Found %d roots",
"roots in %f seconds, rebuilding ancestry map' % (len(roots), stop - start)) start",
"logger.info('Done.') def delete_all_user_roles(apps, schema_editor): ContentType = apps.get_model('contenttypes', \"ContentType\") Role = apps.get_model('main', \"Role\") User",
"from time import time from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding logger = logging.getLogger('rbac_migrations') def",
"stop - start)) start = time() Role.rebuild_role_ancestor_list(roots, []) stop = time() logger.info('Rebuild completed",
"= time() logger.info('Rebuild completed in %f seconds' % (stop - start)) logger.info('Done.') def",
"actual migration work. This gets run after migrate_users, which does role creation for",
"completed in %f seconds' % (stop - start)) logger.info('Done.') def delete_all_user_roles(apps, schema_editor): ContentType",
"Role = apps.get_model('main', \"Role\") User = apps.get_model('auth', \"User\") user_content_type = ContentType.objects.get_for_model(User) for role",
"schema_editor): ContentType = apps.get_model('contenttypes', \"ContentType\") Role = apps.get_model('main', \"Role\") User = apps.get_model('auth', \"User\")",
"in [ 'Organization', 'Team', 'Inventory', 'Project', 'Credential', 'CustomInventoryScript', 'JobTemplate', ] ] with batch_role_ancestor_rebuilding():",
"m in [ 'Organization', 'Team', 'Inventory', 'Project', 'Credential', 'CustomInventoryScript', 'JobTemplate', ] ] with",
"time() roots = Role.objects \\ .all() \\ .values_list('id', flat=True) stop = time() logger.info('Found",
"= apps.get_model('main', \"Role\") User = apps.get_model('auth', \"User\") user_content_type = ContentType.objects.get_for_model(User) for role in",
"obj in model.objects.iterator(): obj.save() def rebuild_role_hierarchy(apps, schema_editor): logger.info('Computing role roots..') start = time()",
"resources. Here we iterate through all of our resource types and call .save()",
"our resource types and call .save() to ensure all that happens for every",
"% (len(roots), stop - start)) start = time() Role.rebuild_role_ancestor_list(roots, []) stop = time()",
"'Team', 'Inventory', 'Project', 'Credential', 'CustomInventoryScript', 'JobTemplate', ] ] with batch_role_ancestor_rebuilding(): for model in",
"schema_editor): logger.info('Computing role roots..') start = time() roots = Role.objects \\ .all() \\",
"role creation happens in our post_save hook for all of our resources. Here",
"with the actual migration work. This gets run after migrate_users, which does role",
"role creation for users a little differently. ''' models = [ apps.get_model('main', m)",
"ensure all that happens for every object in the system before we get",
"rebuilding ancestry map' % (len(roots), stop - start)) start = time() Role.rebuild_role_ancestor_list(roots, [])",
"from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding logger = logging.getLogger('rbac_migrations') def create_roles(apps, schema_editor): ''' Implicit",
"ContentType = apps.get_model('contenttypes', \"ContentType\") Role = apps.get_model('main', \"Role\") User = apps.get_model('auth', \"User\") user_content_type",
"Role.rebuild_role_ancestor_list(roots, []) stop = time() logger.info('Rebuild completed in %f seconds' % (stop -",
"''' Implicit role creation happens in our post_save hook for all of our",
"= [ apps.get_model('main', m) for m in [ 'Organization', 'Team', 'Inventory', 'Project', 'Credential',",
"hook for all of our resources. Here we iterate through all of our",
"] with batch_role_ancestor_rebuilding(): for model in models: for obj in model.objects.iterator(): obj.save() def",
"def rebuild_role_hierarchy(apps, schema_editor): logger.info('Computing role roots..') start = time() roots = Role.objects \\",
"rebuild_role_hierarchy(apps, schema_editor): logger.info('Computing role roots..') start = time() roots = Role.objects \\ .all()",
"\\ .values_list('id', flat=True) stop = time() logger.info('Found %d roots in %f seconds, rebuilding",
"] ] with batch_role_ancestor_rebuilding(): for model in models: for obj in model.objects.iterator(): obj.save()",
"map' % (len(roots), stop - start)) start = time() Role.rebuild_role_ancestor_list(roots, []) stop =",
"- start)) logger.info('Done.') def delete_all_user_roles(apps, schema_editor): ContentType = apps.get_model('contenttypes', \"ContentType\") Role = apps.get_model('main',",
"This gets run after migrate_users, which does role creation for users a little",
"create_roles(apps, schema_editor): ''' Implicit role creation happens in our post_save hook for all",
"of our resources. Here we iterate through all of our resource types and",
"'JobTemplate', ] ] with batch_role_ancestor_rebuilding(): for model in models: for obj in model.objects.iterator():",
"Role.objects \\ .all() \\ .values_list('id', flat=True) stop = time() logger.info('Found %d roots in",
"obj.save() def rebuild_role_hierarchy(apps, schema_editor): logger.info('Computing role roots..') start = time() roots = Role.objects",
"'Project', 'Credential', 'CustomInventoryScript', 'JobTemplate', ] ] with batch_role_ancestor_rebuilding(): for model in models: for",
"in models: for obj in model.objects.iterator(): obj.save() def rebuild_role_hierarchy(apps, schema_editor): logger.info('Computing role roots..')",
"all that happens for every object in the system before we get busy",
"for obj in model.objects.iterator(): obj.save() def rebuild_role_hierarchy(apps, schema_editor): logger.info('Computing role roots..') start =",
"we iterate through all of our resource types and call .save() to ensure",
"seconds' % (stop - start)) logger.info('Done.') def delete_all_user_roles(apps, schema_editor): ContentType = apps.get_model('contenttypes', \"ContentType\")",
"def delete_all_user_roles(apps, schema_editor): ContentType = apps.get_model('contenttypes', \"ContentType\") Role = apps.get_model('main', \"Role\") User =",
".values_list('id', flat=True) stop = time() logger.info('Found %d roots in %f seconds, rebuilding ancestry",
"for every object in the system before we get busy with the actual",
"our resources. Here we iterate through all of our resource types and call",
"system before we get busy with the actual migration work. This gets run",
"work. This gets run after migrate_users, which does role creation for users a",
"migration work. This gets run after migrate_users, which does role creation for users",
"time import time from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding logger = logging.getLogger('rbac_migrations') def create_roles(apps,",
"%d roots in %f seconds, rebuilding ancestry map' % (len(roots), stop - start))",
"= time() logger.info('Found %d roots in %f seconds, rebuilding ancestry map' % (len(roots),",
"model in models: for obj in model.objects.iterator(): obj.save() def rebuild_role_hierarchy(apps, schema_editor): logger.info('Computing role",
"we get busy with the actual migration work. This gets run after migrate_users,",
"\\ .all() \\ .values_list('id', flat=True) stop = time() logger.info('Found %d roots in %f",
"call .save() to ensure all that happens for every object in the system",
"start)) start = time() Role.rebuild_role_ancestor_list(roots, []) stop = time() logger.info('Rebuild completed in %f",
"import logging from time import time from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding logger =",
"happens for every object in the system before we get busy with the",
"logger.info('Rebuild completed in %f seconds' % (stop - start)) logger.info('Done.') def delete_all_user_roles(apps, schema_editor):",
"apps.get_model('main', m) for m in [ 'Organization', 'Team', 'Inventory', 'Project', 'Credential', 'CustomInventoryScript', 'JobTemplate',",
"(stop - start)) logger.info('Done.') def delete_all_user_roles(apps, schema_editor): ContentType = apps.get_model('contenttypes', \"ContentType\") Role =",
"schema_editor): ''' Implicit role creation happens in our post_save hook for all of",
"<reponame>Avinesh/awx import logging from time import time from awx.main.models.rbac import Role, batch_role_ancestor_rebuilding logger",
"migrate_users, which does role creation for users a little differently. ''' models =",
"'Inventory', 'Project', 'Credential', 'CustomInventoryScript', 'JobTemplate', ] ] with batch_role_ancestor_rebuilding(): for model in models:",
"for all of our resources. Here we iterate through all of our resource",
"Implicit role creation happens in our post_save hook for all of our resources.",
"\"ContentType\") Role = apps.get_model('main', \"Role\") User = apps.get_model('auth', \"User\") user_content_type = ContentType.objects.get_for_model(User) for",
"time() logger.info('Found %d roots in %f seconds, rebuilding ancestry map' % (len(roots), stop",
"resource types and call .save() to ensure all that happens for every object",
"flat=True) stop = time() logger.info('Found %d roots in %f seconds, rebuilding ancestry map'",
"in our post_save hook for all of our resources. Here we iterate through",
"does role creation for users a little differently. ''' models = [ apps.get_model('main',",
"logger.info('Computing role roots..') start = time() roots = Role.objects \\ .all() \\ .values_list('id',",
"'Credential', 'CustomInventoryScript', 'JobTemplate', ] ] with batch_role_ancestor_rebuilding(): for model in models: for obj",
"before we get busy with the actual migration work. This gets run after",
"- start)) start = time() Role.rebuild_role_ancestor_list(roots, []) stop = time() logger.info('Rebuild completed in",
"stop = time() logger.info('Rebuild completed in %f seconds' % (stop - start)) logger.info('Done.')",
"the system before we get busy with the actual migration work. This gets",
"which does role creation for users a little differently. ''' models = [",
"for m in [ 'Organization', 'Team', 'Inventory', 'Project', 'Credential', 'CustomInventoryScript', 'JobTemplate', ] ]",
"through all of our resource types and call .save() to ensure all that",
"logger.info('Found %d roots in %f seconds, rebuilding ancestry map' % (len(roots), stop -",
"happens in our post_save hook for all of our resources. Here we iterate",
"Here we iterate through all of our resource types and call .save() to",
"model.objects.iterator(): obj.save() def rebuild_role_hierarchy(apps, schema_editor): logger.info('Computing role roots..') start = time() roots =",
"object in the system before we get busy with the actual migration work.",
"Role, batch_role_ancestor_rebuilding logger = logging.getLogger('rbac_migrations') def create_roles(apps, schema_editor): ''' Implicit role creation happens",
"import Role, batch_role_ancestor_rebuilding logger = logging.getLogger('rbac_migrations') def create_roles(apps, schema_editor): ''' Implicit role creation",
"models = [ apps.get_model('main', m) for m in [ 'Organization', 'Team', 'Inventory', 'Project',",
"every object in the system before we get busy with the actual migration",
"apps.get_model('contenttypes', \"ContentType\") Role = apps.get_model('main', \"Role\") User = apps.get_model('auth', \"User\") user_content_type = ContentType.objects.get_for_model(User)",
"awx.main.models.rbac import Role, batch_role_ancestor_rebuilding logger = logging.getLogger('rbac_migrations') def create_roles(apps, schema_editor): ''' Implicit role",
"our post_save hook for all of our resources. Here we iterate through all",
"the actual migration work. This gets run after migrate_users, which does role creation",
"m) for m in [ 'Organization', 'Team', 'Inventory', 'Project', 'Credential', 'CustomInventoryScript', 'JobTemplate', ]",
".all() \\ .values_list('id', flat=True) stop = time() logger.info('Found %d roots in %f seconds,",
"all of our resources. Here we iterate through all of our resource types",
"ancestry map' % (len(roots), stop - start)) start = time() Role.rebuild_role_ancestor_list(roots, []) stop",
"(len(roots), stop - start)) start = time() Role.rebuild_role_ancestor_list(roots, []) stop = time() logger.info('Rebuild",
".save() to ensure all that happens for every object in the system before",
"%f seconds' % (stop - start)) logger.info('Done.') def delete_all_user_roles(apps, schema_editor): ContentType = apps.get_model('contenttypes',"
] |
[
"Image, ImageEnhance import PILasOPENCV as Image import PILasOPENCV as ImageEnhance img = Image.open('lena.jpg')",
"PILasOPENCV as Image import PILasOPENCV as ImageEnhance img = Image.open('lena.jpg') # enhancer =",
"Image import PILasOPENCV as ImageEnhance img = Image.open('lena.jpg') # enhancer = ImageEnhance.Contrast(img) enhancer.enhance(0.0).save(",
"= Image.open('lena.jpg') # enhancer = ImageEnhance.Contrast(img) enhancer.enhance(0.0).save( \"ImageEnhance_Contrast_000.jpg\") enhancer.enhance(0.25).save( \"ImageEnhance_Contrast_025.jpg\") enhancer.enhance(0.5).save( \"ImageEnhance_Contrast_050.jpg\") enhancer.enhance(0.75).save(",
"PIL import Image, ImageEnhance import PILasOPENCV as Image import PILasOPENCV as ImageEnhance img",
"Image.open('lena.jpg') # enhancer = ImageEnhance.Contrast(img) enhancer.enhance(0.0).save( \"ImageEnhance_Contrast_000.jpg\") enhancer.enhance(0.25).save( \"ImageEnhance_Contrast_025.jpg\") enhancer.enhance(0.5).save( \"ImageEnhance_Contrast_050.jpg\") enhancer.enhance(0.75).save( \"ImageEnhance_Contrast_075.jpg\")",
"import PILasOPENCV as Image import PILasOPENCV as ImageEnhance img = Image.open('lena.jpg') # enhancer",
"import Image, ImageEnhance import PILasOPENCV as Image import PILasOPENCV as ImageEnhance img =",
"as ImageEnhance img = Image.open('lena.jpg') # enhancer = ImageEnhance.Contrast(img) enhancer.enhance(0.0).save( \"ImageEnhance_Contrast_000.jpg\") enhancer.enhance(0.25).save( \"ImageEnhance_Contrast_025.jpg\")",
"ImageEnhance import PILasOPENCV as Image import PILasOPENCV as ImageEnhance img = Image.open('lena.jpg') #",
"ImageEnhance img = Image.open('lena.jpg') # enhancer = ImageEnhance.Contrast(img) enhancer.enhance(0.0).save( \"ImageEnhance_Contrast_000.jpg\") enhancer.enhance(0.25).save( \"ImageEnhance_Contrast_025.jpg\") enhancer.enhance(0.5).save(",
"PILasOPENCV as ImageEnhance img = Image.open('lena.jpg') # enhancer = ImageEnhance.Contrast(img) enhancer.enhance(0.0).save( \"ImageEnhance_Contrast_000.jpg\") enhancer.enhance(0.25).save(",
"# enhancer = ImageEnhance.Contrast(img) enhancer.enhance(0.0).save( \"ImageEnhance_Contrast_000.jpg\") enhancer.enhance(0.25).save( \"ImageEnhance_Contrast_025.jpg\") enhancer.enhance(0.5).save( \"ImageEnhance_Contrast_050.jpg\") enhancer.enhance(0.75).save( \"ImageEnhance_Contrast_075.jpg\") enhancer.enhance(1.0).save(",
"# from PIL import Image, ImageEnhance import PILasOPENCV as Image import PILasOPENCV as",
"enhancer = ImageEnhance.Contrast(img) enhancer.enhance(0.0).save( \"ImageEnhance_Contrast_000.jpg\") enhancer.enhance(0.25).save( \"ImageEnhance_Contrast_025.jpg\") enhancer.enhance(0.5).save( \"ImageEnhance_Contrast_050.jpg\") enhancer.enhance(0.75).save( \"ImageEnhance_Contrast_075.jpg\") enhancer.enhance(1.0).save( \"ImageEnhance_Contrast_100.jpg\")",
"<filename>tests/test_contrast.py<gh_stars>10-100 # from PIL import Image, ImageEnhance import PILasOPENCV as Image import PILasOPENCV",
"from PIL import Image, ImageEnhance import PILasOPENCV as Image import PILasOPENCV as ImageEnhance",
"import PILasOPENCV as ImageEnhance img = Image.open('lena.jpg') # enhancer = ImageEnhance.Contrast(img) enhancer.enhance(0.0).save( \"ImageEnhance_Contrast_000.jpg\")",
"img = Image.open('lena.jpg') # enhancer = ImageEnhance.Contrast(img) enhancer.enhance(0.0).save( \"ImageEnhance_Contrast_000.jpg\") enhancer.enhance(0.25).save( \"ImageEnhance_Contrast_025.jpg\") enhancer.enhance(0.5).save( \"ImageEnhance_Contrast_050.jpg\")",
"as Image import PILasOPENCV as ImageEnhance img = Image.open('lena.jpg') # enhancer = ImageEnhance.Contrast(img)"
] |
[
"Extra Apps: 'raven.contrib.django.raven_compat', ] IMPORT_EXPORT_USE_TRANSACTIONS = True MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware',",
"config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str) LOGIN_URL = '/login/' LOGOUT_URL = '/logout/' LOGIN_REDIRECT_URL = '/' PASSWORD_HASHERS",
"= [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',",
"} # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', },",
"= '/' PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.MD5PasswordHasher', ) # SECURITY WARNING: don't run with",
"= '/login/' LOGOUT_URL = '/logout/' LOGIN_REDIRECT_URL = '/' PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.MD5PasswordHasher', )",
"PROJECT_DIR = BASE_DIR.parent # Quick-start development settings - unsuitable for production # See",
"} } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',",
"'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #Extra context processors: #'django.core.context_processors.i18n', ],",
"# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },",
"}, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },",
"MEDIA_ROOT = BASE_DIR.parent.child('media') STATICFILES_DIRS = ( BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'), ) # BASE_DIR.parent.child('static'), STATICFILES_FINDERS =",
"[ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls' TEMPLATES",
"with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS",
"import config BASE_DIR = Path(__file__).parent PROJECT_DIR = BASE_DIR.parent # Quick-start development settings -",
"INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost') # Email settings EMAIL_HOST = config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str) EMAIL_PORT =",
"turned on in production! DEBUG = True TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS = ('*',)",
"EMAIL_HOST = config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str) EMAIL_PORT = config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int) EMAIL_HOST_USER = config('EMAIL_HOST_USER',",
"default=os.environ.get('EMAIL_HOST'), cast=str) EMAIL_PORT = config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int) EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str) EMAIL_HOST_PASSWORD",
"# SECURITY WARNING: don't run with debug turned on in production! DEBUG =",
"keep the secret key used in production secret! SECRET_KEY = config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str)",
"cast=bool) DEFAULT_FROM_EMAIL = 'StudentMy Team <<EMAIL>>' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # During development only",
"LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Fortaleza' USE_I18N = True USE_L10N = True USE_TZ",
"= config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int) EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str) EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'),",
"# https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = BASE_DIR.parent.child('static') # Application definition INSTALLED_APPS =",
"BASE_DIR.child('locale'), ) MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR.parent.child('media') STATICFILES_DIRS = ( BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'),",
"= BASE_DIR.parent.child('static') # Application definition INSTALLED_APPS = [ 'rest_framework', 'django.contrib.auth', 'core', 'django.contrib.admin', 'django.contrib.contenttypes',",
"] IMPORT_EXPORT_USE_TRANSACTIONS = True MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware',",
"= 'StudentMy Team <<EMAIL>>' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # During development only # Static",
"# Raven Settings: RAVEN_CONFIG = { 'dsn': config('RAVEN_DSN', default=os.environ.get('RAVEN_DSN'), cast=str), #'release': raven.fetch_git_sha(BASE_DIR.child('.git').child('HEAD')), }",
"= True USE_L10N = True USE_TZ = True DEFAULT_CHARSET = 'utf-8' LOCALE_PATHS =",
"'/static/' STATIC_ROOT = BASE_DIR.parent.child('static') # Application definition INSTALLED_APPS = [ 'rest_framework', 'django.contrib.auth', 'core',",
"'core', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Extra Apps: 'raven.contrib.django.raven_compat', ] IMPORT_EXPORT_USE_TRANSACTIONS =",
"'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates',",
"production! DEBUG = True TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS = ('*',) INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost')",
"= ( BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'), ) # BASE_DIR.parent.child('static'), STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', )",
"'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Extra Apps: 'raven.contrib.django.raven_compat', ] IMPORT_EXPORT_USE_TRANSACTIONS = True",
"= 'utf-8' LOCALE_PATHS = ( BASE_DIR.child('locale'), ) MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR.parent.child('media')",
"], }, }, ] WSGI_APPLICATION = 'app.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES =",
"= 'app.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3',",
"cast=int) EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str) EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str) EMAIL_USE_TLS =",
"'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #Extra context processors: #'django.core.context_processors.i18n', ], }, },",
"key used in production secret! SECRET_KEY = config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str) LOGIN_URL = '/login/'",
"config BASE_DIR = Path(__file__).parent PROJECT_DIR = BASE_DIR.parent # Quick-start development settings - unsuitable",
"= Path(__file__).parent PROJECT_DIR = BASE_DIR.parent # Quick-start development settings - unsuitable for production",
"( 'django.contrib.auth.hashers.MD5PasswordHasher', ) # SECURITY WARNING: don't run with debug turned on in",
"https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = BASE_DIR.parent.child('static') # Application definition INSTALLED_APPS = [",
"[ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },",
"DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } #",
"# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production",
"('127.0.0.1','192.168.0.1','localhost') # Email settings EMAIL_HOST = config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str) EMAIL_PORT = config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'),",
"<reponame>danieldsf/ads-activities import os, raven, logging from unipath import Path from decouple import config",
"default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str) EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool) DEFAULT_FROM_EMAIL = 'StudentMy Team <<EMAIL>>' EMAIL_BACKEND",
"https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, {",
"'America/Fortaleza' USE_I18N = True USE_L10N = True USE_TZ = True DEFAULT_CHARSET = 'utf-8'",
"USE_L10N = True USE_TZ = True DEFAULT_CHARSET = 'utf-8' LOCALE_PATHS = ( BASE_DIR.child('locale'),",
"( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Raven Settings: RAVEN_CONFIG = { 'dsn': config('RAVEN_DSN', default=os.environ.get('RAVEN_DSN'),",
"True DEFAULT_CHARSET = 'utf-8' LOCALE_PATHS = ( BASE_DIR.child('locale'), ) MEDIA_URL = '/media/' MEDIA_ROOT",
"= True USE_TZ = True DEFAULT_CHARSET = 'utf-8' LOCALE_PATHS = ( BASE_DIR.child('locale'), )",
"JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = BASE_DIR.parent.child('static') # Application definition",
"{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, {",
"secret! SECRET_KEY = config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str) LOGIN_URL = '/login/' LOGOUT_URL = '/logout/' LOGIN_REDIRECT_URL",
"'en-us' TIME_ZONE = 'America/Fortaleza' USE_I18N = True USE_L10N = True USE_TZ = True",
"'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #Extra context processors: #'django.core.context_processors.i18n', ], }, }, ] WSGI_APPLICATION =",
"WSGI_APPLICATION = 'app.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE':",
"definition INSTALLED_APPS = [ 'rest_framework', 'django.contrib.auth', 'core', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #",
"'raven.contrib.django.raven_compat', ] IMPORT_EXPORT_USE_TRANSACTIONS = True MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware',",
"] ROOT_URLCONF = 'app.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS':",
"'django.contrib.auth', 'core', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Extra Apps: 'raven.contrib.django.raven_compat', ] IMPORT_EXPORT_USE_TRANSACTIONS",
"'/media/' MEDIA_ROOT = BASE_DIR.parent.child('media') STATICFILES_DIRS = ( BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'), ) # BASE_DIR.parent.child('static'), STATICFILES_FINDERS",
"ROOT_URLCONF = 'app.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True,",
"'django.contrib.messages', 'django.contrib.staticfiles', # Extra Apps: 'raven.contrib.django.raven_compat', ] IMPORT_EXPORT_USE_TRANSACTIONS = True MIDDLEWARE = [",
"Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING:",
"'/login/' LOGOUT_URL = '/logout/' LOGIN_REDIRECT_URL = '/' PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.MD5PasswordHasher', ) #",
"default=os.environ.get('EMAIL_USE_TLS'), cast=bool) DEFAULT_FROM_EMAIL = 'StudentMy Team <<EMAIL>>' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # During development",
"True USE_TZ = True DEFAULT_CHARSET = 'utf-8' LOCALE_PATHS = ( BASE_DIR.child('locale'), ) MEDIA_URL",
"'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] #",
"BASE_DIR.parent.child('static'), STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Raven Settings: RAVEN_CONFIG = {",
"cast=str) EMAIL_PORT = config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int) EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str) EMAIL_HOST_PASSWORD =",
"production secret! SECRET_KEY = config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str) LOGIN_URL = '/login/' LOGOUT_URL = '/logout/'",
"- unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret",
"= 'America/Fortaleza' USE_I18N = True USE_L10N = True USE_TZ = True DEFAULT_CHARSET =",
") # Raven Settings: RAVEN_CONFIG = { 'dsn': config('RAVEN_DSN', default=os.environ.get('RAVEN_DSN'), cast=str), #'release': raven.fetch_git_sha(BASE_DIR.child('.git').child('HEAD')),",
"'django.contrib.auth.hashers.MD5PasswordHasher', ) # SECURITY WARNING: don't run with debug turned on in production!",
"= BASE_DIR.parent.child('media') STATICFILES_DIRS = ( BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'), ) # BASE_DIR.parent.child('static'), STATICFILES_FINDERS = (",
"'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls' TEMPLATES =",
"# Email settings EMAIL_HOST = config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str) EMAIL_PORT = config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int)",
"cast=str) LOGIN_URL = '/login/' LOGOUT_URL = '/logout/' LOGIN_REDIRECT_URL = '/' PASSWORD_HASHERS = (",
"= '/static/' STATIC_ROOT = BASE_DIR.parent.child('static') # Application definition INSTALLED_APPS = [ 'rest_framework', 'django.contrib.auth',",
"config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str) EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str) EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool)",
"[ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #Extra context processors: #'django.core.context_processors.i18n', ], }, }, ]",
"TIME_ZONE = 'America/Fortaleza' USE_I18N = True USE_L10N = True USE_TZ = True DEFAULT_CHARSET",
"os, raven, logging from unipath import Path from decouple import config BASE_DIR =",
"settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the",
"# Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY",
"'rest_framework', 'django.contrib.auth', 'core', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Extra Apps: 'raven.contrib.django.raven_compat', ]",
"STATIC_ROOT.child('custom'), ) # BASE_DIR.parent.child('static'), STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Raven Settings:",
"from unipath import Path from decouple import config BASE_DIR = Path(__file__).parent PROJECT_DIR =",
"https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } }",
"True MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF",
"<<EMAIL>>' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # During development only # Static files (CSS, JavaScript,",
"default=os.environ.get('SECRET_KEY'), cast=str) LOGIN_URL = '/login/' LOGOUT_URL = '/logout/' LOGIN_REDIRECT_URL = '/' PASSWORD_HASHERS =",
"'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Extra Apps: 'raven.contrib.django.raven_compat', ] IMPORT_EXPORT_USE_TRANSACTIONS = True MIDDLEWARE",
"{ 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #Extra context processors: #'django.core.context_processors.i18n', ], },",
"{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ]",
"Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = BASE_DIR.parent.child('static') # Application definition INSTALLED_APPS",
"# Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, {",
"'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages',",
"SECRET_KEY = config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str) LOGIN_URL = '/login/' LOGOUT_URL = '/logout/' LOGIN_REDIRECT_URL =",
"= ( 'django.contrib.auth.hashers.MD5PasswordHasher', ) # SECURITY WARNING: don't run with debug turned on",
"True TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS = ('*',) INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost') # Email settings",
"'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Extra Apps: 'raven.contrib.django.raven_compat', ] IMPORT_EXPORT_USE_TRANSACTIONS = True MIDDLEWARE =",
"'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME':",
"= config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str) EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool) DEFAULT_FROM_EMAIL = 'StudentMy Team",
"Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),",
"'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE =",
"# Application definition INSTALLED_APPS = [ 'rest_framework', 'django.contrib.auth', 'core', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages',",
"'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls' TEMPLATES = [",
"= ( BASE_DIR.child('locale'), ) MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR.parent.child('media') STATICFILES_DIRS = (",
"development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep",
"LOGOUT_URL = '/logout/' LOGIN_REDIRECT_URL = '/' PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.MD5PasswordHasher', ) # SECURITY",
"INSTALLED_APPS = [ 'rest_framework', 'django.contrib.auth', 'core', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Extra",
"IMPORT_EXPORT_USE_TRANSACTIONS = True MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware',",
"= 'app.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS':",
") MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR.parent.child('media') STATICFILES_DIRS = ( BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'), )",
"STATICFILES_DIRS = ( BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'), ) # BASE_DIR.parent.child('static'), STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder',",
"] WSGI_APPLICATION = 'app.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': {",
"( BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'), ) # BASE_DIR.parent.child('static'), STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) #",
"'/logout/' LOGIN_REDIRECT_URL = '/' PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.MD5PasswordHasher', ) # SECURITY WARNING: don't",
"#Extra context processors: #'django.core.context_processors.i18n', ], }, }, ] WSGI_APPLICATION = 'app.wsgi.application' # Database",
"import Path from decouple import config BASE_DIR = Path(__file__).parent PROJECT_DIR = BASE_DIR.parent #",
"EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str) EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str) EMAIL_USE_TLS = config('EMAIL_USE_TLS',",
"= [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [",
"USE_TZ = True DEFAULT_CHARSET = 'utf-8' LOCALE_PATHS = ( BASE_DIR.child('locale'), ) MEDIA_URL =",
"# During development only # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL",
"Apps: 'raven.contrib.django.raven_compat', ] IMPORT_EXPORT_USE_TRANSACTIONS = True MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware',",
"= config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str) EMAIL_PORT = config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int) EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'),",
"development only # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/'",
"('*',) INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost') # Email settings EMAIL_HOST = config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str) EMAIL_PORT",
"https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY",
"raven, logging from unipath import Path from decouple import config BASE_DIR = Path(__file__).parent",
"default=os.environ.get('EMAIL_HOST_USER'), cast=str) EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str) EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool) DEFAULT_FROM_EMAIL",
"WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG",
"= [ 'rest_framework', 'django.contrib.auth', 'core', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Extra Apps:",
"'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',",
"= True DEFAULT_CHARSET = 'utf-8' LOCALE_PATHS = ( BASE_DIR.child('locale'), ) MEDIA_URL = '/media/'",
"True USE_L10N = True USE_TZ = True DEFAULT_CHARSET = 'utf-8' LOCALE_PATHS = (",
"used in production secret! SECRET_KEY = config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str) LOGIN_URL = '/login/' LOGOUT_URL",
"run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = DEBUG",
"DEFAULT_CHARSET = 'utf-8' LOCALE_PATHS = ( BASE_DIR.child('locale'), ) MEDIA_URL = '/media/' MEDIA_ROOT =",
"default=os.environ.get('EMAIL_PORT'), cast=int) EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str) EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str) EMAIL_USE_TLS",
"LOGIN_REDIRECT_URL = '/' PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.MD5PasswordHasher', ) # SECURITY WARNING: don't run",
"{ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE",
"STATIC_ROOT = BASE_DIR.parent.child('static') # Application definition INSTALLED_APPS = [ 'rest_framework', 'django.contrib.auth', 'core', 'django.contrib.admin',",
"config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str) EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool) DEFAULT_FROM_EMAIL = 'StudentMy Team <<EMAIL>>'",
"SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = config('SECRET_KEY',",
"WARNING: keep the secret key used in production secret! SECRET_KEY = config('SECRET_KEY', default=os.environ.get('SECRET_KEY'),",
"= DEBUG ALLOWED_HOSTS = ('*',) INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost') # Email settings EMAIL_HOST =",
"True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #Extra context processors: #'django.core.context_processors.i18n',",
"https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Fortaleza' USE_I18N = True USE_L10N = True",
"only # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT",
"}, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization #",
"'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Fortaleza'",
"{ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/",
"'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls' TEMPLATES = [ { 'BACKEND':",
"files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = BASE_DIR.parent.child('static') #",
"PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.MD5PasswordHasher', ) # SECURITY WARNING: don't run with debug turned",
"= ('127.0.0.1','192.168.0.1','localhost') # Email settings EMAIL_HOST = config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str) EMAIL_PORT = config('EMAIL_PORT',",
"BASE_DIR.parent.child('static') # Application definition INSTALLED_APPS = [ 'rest_framework', 'django.contrib.auth', 'core', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions',",
"cast=str) EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str) EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool) DEFAULT_FROM_EMAIL =",
"'app.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': {",
"'django.contrib.messages.context_processors.messages', #Extra context processors: #'django.core.context_processors.i18n', ], }, }, ] WSGI_APPLICATION = 'app.wsgi.application' #",
"}, }, ] WSGI_APPLICATION = 'app.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = {",
"os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ {",
"'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE",
"LOCALE_PATHS = ( BASE_DIR.child('locale'), ) MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR.parent.child('media') STATICFILES_DIRS =",
"= { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password",
"from decouple import config BASE_DIR = Path(__file__).parent PROJECT_DIR = BASE_DIR.parent # Quick-start development",
"{ 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation",
"'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [",
"cast=str) EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool) DEFAULT_FROM_EMAIL = 'StudentMy Team <<EMAIL>>' EMAIL_BACKEND =",
"Path from decouple import config BASE_DIR = Path(__file__).parent PROJECT_DIR = BASE_DIR.parent # Quick-start",
"Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Fortaleza' USE_I18N = True USE_L10N",
"[ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug',",
"# Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Fortaleza' USE_I18N = True",
"Email settings EMAIL_HOST = config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str) EMAIL_PORT = config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int) EMAIL_HOST_USER",
"Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = BASE_DIR.parent.child('static')",
"Application definition INSTALLED_APPS = [ 'rest_framework', 'django.contrib.auth', 'core', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles',",
"MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF =",
"# https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), }",
"DEBUG ALLOWED_HOSTS = ('*',) INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost') # Email settings EMAIL_HOST = config('EMAIL_HOST',",
"# https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Fortaleza' USE_I18N = True USE_L10N =",
"production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in",
"STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Raven Settings: RAVEN_CONFIG = { 'dsn':",
"for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used",
"Path(__file__).parent PROJECT_DIR = BASE_DIR.parent # Quick-start development settings - unsuitable for production #",
"[], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #Extra context",
"on in production! DEBUG = True TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS = ('*',) INTERNAL_IPS",
"# Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT =",
"BASE_DIR.parent.child('media') STATICFILES_DIRS = ( BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'), ) # BASE_DIR.parent.child('static'), STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder',",
"debug turned on in production! DEBUG = True TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS =",
"#'django.core.context_processors.i18n', ], }, }, ] WSGI_APPLICATION = 'app.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES",
"During development only # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL =",
"'django.contrib.staticfiles', # Extra Apps: 'raven.contrib.django.raven_compat', ] IMPORT_EXPORT_USE_TRANSACTIONS = True MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware',",
"in production secret! SECRET_KEY = config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str) LOGIN_URL = '/login/' LOGOUT_URL =",
"'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #Extra context processors: #'django.core.context_processors.i18n', ], }, }, ] WSGI_APPLICATION",
"= True MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ]",
"'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls' TEMPLATES = [ {",
"BASE_DIR = Path(__file__).parent PROJECT_DIR = BASE_DIR.parent # Quick-start development settings - unsuitable for",
"validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',",
"'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #Extra context processors:",
"}, ] WSGI_APPLICATION = 'app.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default':",
"'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE =",
"'StudentMy Team <<EMAIL>>' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # During development only # Static files",
"unipath import Path from decouple import config BASE_DIR = Path(__file__).parent PROJECT_DIR = BASE_DIR.parent",
"= 'en-us' TIME_ZONE = 'America/Fortaleza' USE_I18N = True USE_L10N = True USE_TZ =",
"'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Raven Settings: RAVEN_CONFIG = { 'dsn': config('RAVEN_DSN', default=os.environ.get('RAVEN_DSN'), cast=str),",
"] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Fortaleza' USE_I18N =",
"context processors: #'django.core.context_processors.i18n', ], }, }, ] WSGI_APPLICATION = 'app.wsgi.application' # Database #",
"Team <<EMAIL>>' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # During development only # Static files (CSS,",
"'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [],",
"'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS",
"'utf-8' LOCALE_PATHS = ( BASE_DIR.child('locale'), ) MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR.parent.child('media') STATICFILES_DIRS",
"config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int) EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str) EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str)",
"logging from unipath import Path from decouple import config BASE_DIR = Path(__file__).parent PROJECT_DIR",
"= ('*',) INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost') # Email settings EMAIL_HOST = config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str)",
"# BASE_DIR.parent.child('static'), STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Raven Settings: RAVEN_CONFIG =",
"EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str) EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool) DEFAULT_FROM_EMAIL = 'StudentMy",
"'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization",
"}, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us'",
"import os, raven, logging from unipath import Path from decouple import config BASE_DIR",
"'app.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME':",
"USE_I18N = True USE_L10N = True USE_TZ = True DEFAULT_CHARSET = 'utf-8' LOCALE_PATHS",
"BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'), ) # BASE_DIR.parent.child('static'), STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Raven",
"LOGIN_URL = '/login/' LOGOUT_URL = '/logout/' LOGIN_REDIRECT_URL = '/' PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.MD5PasswordHasher',",
") # SECURITY WARNING: don't run with debug turned on in production! DEBUG",
"= [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls'",
"TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors':",
"the secret key used in production secret! SECRET_KEY = config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str) LOGIN_URL",
"DEBUG = True TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS = ('*',) INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost') #",
") # BASE_DIR.parent.child('static'), STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Raven Settings: RAVEN_CONFIG",
"config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str) EMAIL_PORT = config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int) EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str)",
"# Extra Apps: 'raven.contrib.django.raven_compat', ] IMPORT_EXPORT_USE_TRANSACTIONS = True MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware',",
"= '/logout/' LOGIN_REDIRECT_URL = '/' PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.MD5PasswordHasher', ) # SECURITY WARNING:",
"'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #Extra",
"TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS = ('*',) INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost') # Email settings EMAIL_HOST",
"Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME':",
"settings EMAIL_HOST = config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'), cast=str) EMAIL_PORT = config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int) EMAIL_HOST_USER =",
"'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS =",
"SECURITY WARNING: don't run with debug turned on in production! DEBUG = True",
"STATIC_URL = '/static/' STATIC_ROOT = BASE_DIR.parent.child('static') # Application definition INSTALLED_APPS = [ 'rest_framework',",
"'django.core.mail.backends.console.EmailBackend' # During development only # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/",
"# SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY =",
"See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret!",
"'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation #",
"{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request',",
"{ 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators",
"config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool) DEFAULT_FROM_EMAIL = 'StudentMy Team <<EMAIL>>' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # During",
"ALLOWED_HOSTS = ('*',) INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost') # Email settings EMAIL_HOST = config('EMAIL_HOST', default=os.environ.get('EMAIL_HOST'),",
"AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME':",
"don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG =",
"( BASE_DIR.child('locale'), ) MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR.parent.child('media') STATICFILES_DIRS = ( BASE_DIR.parent.child('node_modules'),",
"}, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Fortaleza' USE_I18N",
"= '/media/' MEDIA_ROOT = BASE_DIR.parent.child('media') STATICFILES_DIRS = ( BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'), ) # BASE_DIR.parent.child('static'),",
"'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #Extra context processors: #'django.core.context_processors.i18n', ], }, }, ] WSGI_APPLICATION = 'app.wsgi.application'",
"= ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Raven Settings: RAVEN_CONFIG = { 'dsn': config('RAVEN_DSN',",
"'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # Raven Settings: RAVEN_CONFIG = { 'dsn': config('RAVEN_DSN', default=os.environ.get('RAVEN_DSN'), cast=str), #'release':",
"'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME':",
"EMAIL_PORT = config('EMAIL_PORT', default=os.environ.get('EMAIL_PORT'), cast=int) EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str) EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD',",
"EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # During development only # Static files (CSS, JavaScript, Images)",
"= config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool) DEFAULT_FROM_EMAIL = 'StudentMy Team <<EMAIL>>' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' #",
"# Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR,",
"[ 'rest_framework', 'django.contrib.auth', 'core', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Extra Apps: 'raven.contrib.django.raven_compat',",
"EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'), cast=bool) DEFAULT_FROM_EMAIL = 'StudentMy Team <<EMAIL>>' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'",
"BASE_DIR.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ #",
"(CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = BASE_DIR.parent.child('static') # Application",
"'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'app.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS':",
"processors: #'django.core.context_processors.i18n', ], }, }, ] WSGI_APPLICATION = 'app.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases",
"'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth',",
"MEDIA_URL = '/media/' MEDIA_ROOT = BASE_DIR.parent.child('media') STATICFILES_DIRS = ( BASE_DIR.parent.child('node_modules'), STATIC_ROOT.child('custom'), ) #",
"unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key",
"'/' PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.MD5PasswordHasher', ) # SECURITY WARNING: don't run with debug",
"in production! DEBUG = True TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS = ('*',) INTERNAL_IPS =",
"= BASE_DIR.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/",
"secret key used in production secret! SECRET_KEY = config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str) LOGIN_URL =",
"= True TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS = ('*',) INTERNAL_IPS = ('127.0.0.1','192.168.0.1','localhost') # Email",
"DEFAULT_FROM_EMAIL = 'StudentMy Team <<EMAIL>>' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # During development only #",
"= 'django.core.mail.backends.console.EmailBackend' # During development only # Static files (CSS, JavaScript, Images) #",
"= config('EMAIL_HOST_USER', default=os.environ.get('EMAIL_HOST_USER'), cast=str) EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=os.environ.get('EMAIL_HOST_PASSWORD'), cast=str) EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=os.environ.get('EMAIL_USE_TLS'),",
"= config('SECRET_KEY', default=os.environ.get('SECRET_KEY'), cast=str) LOGIN_URL = '/login/' LOGOUT_URL = '/logout/' LOGIN_REDIRECT_URL = '/'",
"decouple import config BASE_DIR = Path(__file__).parent PROJECT_DIR = BASE_DIR.parent # Quick-start development settings"
] |
[
"fixed-size window # REVIEW I could refactor handling the boarder into pad_image(). then",
"enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot) # weak",
"to matlab's im2col class IterateOverWindows(object): def __init__(self, pixels_per_cell, pixel_stride=None, image=None, mode='constant', cval=0, start_pt=(0,",
"__init__(self, pixels_per_cell, pixel_stride=None, image=None, mode='constant', cval=0, start_pt=(0, 0), stop_pt=(None, None)): ''' Sliding window",
"= max(0, bbox.min_x), min(ncols, bbox.max_x) min_y, max_y = max(0, bbox.min_y), min(nrows, bbox.max_y) #print('c=%d'%c,",
"y def __iter__(self): '''iterate over fields tuple/list style''' for field_name in self.__slots__: yield",
"the chip BoundingBox bbox : the inclusive extents of the chip (which may",
"= min_x+self.pixels_per_cell[0] min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min_y+self.pixels_per_cell[1] bbox",
"self.image = image self.mode = mode self.cval = cval self.start_pt = Point2D(*(int(s) for",
"= np.atleast_2d( # bottom border np.flipud(chip)[:nrows_chunk-max_y, :, ...]) chunk[min_y:max_y, max_x:, ...] = np.atleast_2d(",
"np.arange(81).reshape((9,9)).T >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n',",
"generic mutable namedtuple class Point2D(object): __slots__ = \"x\", \"y\" def __init__(self, x, y):",
"optional like numpy.array (ndim == 2 or 3) ''' def setImage(self, image): '''",
"of an image # # this is similar to matlab's im2col class IterateOverWindows(object):",
"= 0; im = np.arange(81).reshape((9,9)).T >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ... tot +=",
"filled according to the given mode. numpy.array mask : the binary mask of",
"share the same iter() interface # TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces # pixel_stride",
"within the current window. Points outside the boundaries of the input are filled",
"ncols - min_x) min_x = max(0, -min_x) min_y = self.start_pt.y + self.pixel_stride[1]*r -",
"= np.fliplr(np.atleast_2d( # bottom-left corner np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...])) elif self.mode == 'discard': mask",
"min_x+self.pixels_per_cell[0] min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min_y+self.pixels_per_cell[1] bbox =",
"numpy.array, optional chip : pixels within the current window. Points outside the boundaries",
":, ...])) chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d( # left border chip[:, :min_x, ...]))",
"pixels_per_cell, pixel_stride=None, image=None, mode='constant', cval=0, start_pt=(0, 0), stop_pt=(None, None)): ''' Sliding window iterator.",
"- pixels_per_half_cell[1] max_y = min_y+self.pixels_per_cell[1] bbox = BoundingBox(min_x,max_x,min_y,max_y) min_x, max_x = max(0, bbox.min_x),",
"''' Parameters ---------- segmented : array_like Superpixel labeled segmentation (like numpy.array) NOTE regionprops",
"out pixels in this sliding window min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0]",
"= self.image[min_y:max_y, min_x:max_x, ...] # couch chip in a fixed-size window # REVIEW",
"right/bottom boarder of an image # # this is similar to matlab's im2col",
"Only ``mode='constant'``, ``mode='discard'`` and ``mode='reflect'`` are currently supported, although others could be added",
"def setImage(self, image): ''' Parameters ---------- image : array_like like numpy.array (ndim ==",
"in self.__slots__: yield getattr(self, field_name) def __getitem__(self, index): '''tuple/list style getitem''' return getattr(self,",
"simplifies indexing) chunk[max_y:, min_x:max_x, ...] = np.atleast_2d( # bottom border np.flipud(chip)[:nrows_chunk-max_y, :, ...])",
"input can be filled from chip. # this seems harder than it should",
"2 == 1, \\ 'provide an odd number for pixels_per_cell to easily center",
"''' assert pixels_per_cell[0] % 2 == 1 and pixels_per_cell[1] % 2 == 1,",
"labels to be sequential and start at 1: {1,2,...}. label 0 is treated",
"= image return self def iter(self, image=None): '''Next superpixel generator Parameters ---------- image",
"regionprops() treats label zero (0) as unlabeled and ignores it # TODO remove",
"iter(self, image=None): '''Next superpixel generator Parameters ---------- image : array_like, optional like numpy.array",
"else: assert False, 'unrecognized mode' # FIXME should bbox be max-1 like in",
"+= ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot) # weak test 22647 >>>",
"more efficient though if self.mode == 'constant' or self.mode == 'reflect': chunk =",
"in properties: if rp._slice is None: continue (min_y,min_x,max_y,max_x) = rp.bbox chip = image[min_y:max_y,",
"and ``mode='reflect'`` are currently supported, although others could be added (e.g., 'nearest' and",
"harder than it should be... chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d( # top border",
"''' Sliding window iterator. Parameters ---------- pixels_per_cell : array_like x,y - let x,y",
"stop_pt : array_like, optional (x,y) >>> tot = 0; im = np.arange(100).reshape((10,10)) >>>",
">>> tot = 0; im = np.arange(81).reshape((9,9)).T >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ...",
"s in start_pt)) self.stop_pt = Point2D(*(stop_pt)) def setImage(self, image): ''' Parameters ---------- image",
"'reflect': nrows_chunk, ncols_chunk = chunk.shape[0:2] # NOTE assume the points outside the boundaries",
"could refactor handling the boarder into pad_image(). then mode wouldn't # be necessary",
"optional chip : pixels within the current window. Points outside the boundaries of",
"(min_y,min_x,max_y,max_x) = rp.bbox chip = image[min_y:max_y, min_x:max_x,...] mask = rp.filled_image bbox = BoundingBox(min_x,max_x-1,min_y,max_y-1)",
"... #print(i, ':\\n', ret[0]) >>> print(tot) # weak test 25000 ''' assert pixels_per_cell[0]",
"2 or 3) Returns ------- numpy.array, optional chip : pixels within the current",
"outside the boundaries of the input if ``mode='constant'``. Default is 0.0 start_pt :",
"#import ipdb # could maybe turn this into a generic mutable namedtuple class",
"im = np.arange(100).reshape((10,10)) >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ... tot += ret[0].sum() ...",
"it # TODO remove small, unconnected components properties = skimage.measure.regionprops(self.segmented) BoundingBox = namedtuple(\"BoundingBox\",",
"optional like numpy.array (ndim == 2 or 3) mode : str, optional Points",
"min(self.pixels_per_cell[0], ncols - min_x) min_x = max(0, -min_x) min_y = self.start_pt.y + self.pixel_stride[1]*r",
"image=None): '''Next superpixel generator Parameters ---------- image : array_like, optional like numpy.array (ndim",
"getitem''' return getattr(self, self.__slots__[index]) # NOTE IterateOverWindows and IterateOverSuperpixels must share the same",
"...])) chunk[max_y:, max_x:, ...] = np.atleast_2d( # bottom-right corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...]) chunk[max_y:,",
"== 2 or 3) mode : str, optional Points outside the boundaries of",
"here and I could simply loop over the image. # RE this is",
"regionprops expects labels to be sequential and start at 1: {1,2,...}. label 0",
"boundaries of the input are filled according to the given mode. Only ``mode='constant'``,",
"be of type NoneType\") # regionprops() treats label zero (0) as unlabeled and",
"... tot += ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot) # weak test",
"this is similar to matlab's im2col class IterateOverWindows(object): def __init__(self, pixels_per_cell, pixel_stride=None, image=None,",
"= cval self.start_pt = Point2D(*(int(s) for s in start_pt)) self.stop_pt = Point2D(*(stop_pt)) def",
"# couch chip in a fixed-size window # REVIEW I could refactor handling",
"image is not None: self.image = image elif self.image is None: raise TypeError(\"self.image",
"np.atleast_2d( # bottom-right corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...]) chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d( #",
"tot += ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot) # weak test 22647",
"right border np.fliplr(chip)[:, :ncols_chunk-max_x, ...]) chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( # top-left corner",
"stop_y = nrows if self.stop_pt.y is None else int(self.stop_pt.y) roi_height = stop_y-self.start_pt.y roi_width",
"3) ''' def setImage(self, image): ''' Parameters ---------- image : array_like like numpy.array",
"corner np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...])) chunk[max_y:, max_x:, ...] = np.atleast_2d( # bottom-right corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y,",
"pixels_per_cell : array_like x,y - let x,y be odd so the window can",
"mask = np.zeros(self.pixels_per_cell) min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min(self.pixels_per_cell[0],",
"optional (x,y) stop_pt : array_like, optional (x,y) >>> tot = 0; im =",
"None: self.image = image elif self.image is None: raise TypeError(\"self.image cannot be of",
"mode : str, optional Points outside the boundaries of the input are filled",
": array_like Superpixel labeled segmentation (like numpy.array) NOTE regionprops expects labels to be",
"min_y max_y\") pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2 ystrides_per_image, xstrides_per_image = self.shape() # iterate around",
"0; im = np.arange(81).reshape((9,9)).T >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ... tot += ret[0].sum()",
"the boundaries of the input are filled according to the given mode. numpy.array",
"the window within the chip BoundingBox bbox : the inclusive extents of the",
"turned into a class ''' if image is not None: self.image = image",
"image=None): self.segmented = segmented self.image = image ''' Parameters ---------- segmented : array_like",
"2 or 3) ''' self.image = image return self def shape(self): if self.image",
"mode. numpy.array mask : the binary mask of the window within the chip",
"is more efficient though if self.mode == 'constant' or self.mode == 'reflect': chunk",
"boarder of the image for r in xrange(ystrides_per_image): for c in xrange(xstrides_per_image): #",
"x,y image : array_like, optional like numpy.array (ndim == 2 or 3) mode",
"3) ''' self.image = image return self def shape(self): if self.image is None:",
"components properties = skimage.measure.regionprops(self.segmented) BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") for rp",
"chip (which may exceed the bounds of the image) MODIFICATIONS sgr : turned",
"is None: raise TypeError(\"self.image cannot be of type NoneType\") nrows, ncols = self.image.shape[0:2]",
"2 or 3) ''' self.image = image return self def iter(self, image=None): '''Next",
"# NOTE if pixel_stride > pixels_per_cell/2, it is possible to leave data unseen",
"# NOTE neg indice trikery (flipping first simplifies indexing) chunk[max_y:, min_x:max_x, ...] =",
"indexing) chunk[max_y:, min_x:max_x, ...] = np.atleast_2d( # bottom border np.flipud(chip)[:nrows_chunk-max_y, :, ...]) chunk[min_y:max_y,",
"are filled according to the given mode. Only ``mode='constant'``, ``mode='discard'`` and ``mode='reflect'`` are",
"= np.flipud(np.atleast_2d( # top border chip[:min_y, :, ...])) chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d(",
"return (nrows, ncols) def iter(self,image=None): '''Next window generator Parameters ---------- image : array_like",
"(nrows, ncols) def iter(self,image=None): '''Next window generator Parameters ---------- image : array_like like",
":min_x, ...]))) chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d( # top-right corner np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...]))",
"start_pt)) self.stop_pt = Point2D(*(stop_pt)) def setImage(self, image): ''' Parameters ---------- image : array_like",
"chip : pixels within the current window. Points outside the boundaries of the",
"else pixel_stride self.image = image self.mode = mode self.cval = cval self.start_pt =",
"be necessary here and I could simply loop over the image. # RE",
"mask of the window within the chip BoundingBox bbox : the inclusive extents",
"max(0, bbox.min_x), min(ncols, bbox.max_x) min_y, max_y = max(0, bbox.min_y), min(nrows, bbox.max_y) #print('c=%d'%c, 'r=%d'%r,",
"couch chip in a fixed-size window # REVIEW I could refactor handling the",
"yield chunk, mask, bbox class IterateOverSuperpixels(object): def __init__(self, segmented, image=None): self.segmented = segmented",
"chunk[max_y:, min_x:max_x, ...] = np.atleast_2d( # bottom border np.flipud(chip)[:nrows_chunk-max_y, :, ...]) chunk[min_y:max_y, max_x:,",
"a generic mutable namedtuple class Point2D(object): __slots__ = \"x\", \"y\" def __init__(self, x,",
"the same iter() interface # TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces # pixel_stride <=",
"# pixel_stride <= pixels_per_cell # # NOTE if pixel_stride > pixels_per_cell/2, it is",
"iter(self,image=None): '''Next window generator Parameters ---------- image : array_like like numpy.array (ndim ==",
"input are filled according to the given mode. numpy.array mask : the binary",
"(ndim == 2 or 3) Returns ------- numpy.array, optional chip : pixels within",
"may exceed the bounds of the image) MODIFICATIONS sgr : turned into a",
"# NOTE could iterate over the interior of the image without bounds checking",
"max_x min_y max_y\") pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2 ystrides_per_image, xstrides_per_image = self.shape() # iterate",
"= max(0, -min_x) min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min(self.pixels_per_cell[1],",
"numpy.array (ndim == 2 or 3) Returns ------- numpy.array, optional chip : pixels",
"import namedtuple import skimage.measure #import matplotlib.pyplot as plt #import ipdb # could maybe",
"image) MODIFICATIONS sgr : optimized sgr : turned into a class ''' if",
"optimized sgr : turned into a class ''' if image is not None:",
": array_like, optional x,y image : array_like, optional like numpy.array (ndim == 2",
"1 and pixels_per_cell[1] % 2 == 1, \\ 'provide an odd number for",
"of the chip (which may exceed the bounds of the image) MODIFICATIONS sgr",
"pixels_per_half_cell[0] max_x = min(self.pixels_per_cell[0], ncols - min_x) min_x = max(0, -min_x) min_y =",
"chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d( # left border chip[:, :min_x, ...])) # NOTE",
"2 or 3) ''' def setImage(self, image): ''' Parameters ---------- image : array_like",
"= stop_y-self.start_pt.y roi_width = stop_x-self.start_pt.x #print(roi_width, roi_height, self.pixel_stride) nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols =",
"+ self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min_y+self.pixels_per_cell[1] bbox = BoundingBox(min_x,max_x,min_y,max_y) min_x, max_x =",
"max_y) #print() chunk[min_y:max_y, min_x:max_x, ...] = chip mask[min_y:max_y, min_x:max_x] = 1 if self.mode",
":ncols_chunk-max_x, ...])) chunk[max_y:, max_x:, ...] = np.atleast_2d( # bottom-right corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...])",
"mode='reflect' ''' if image is not None: self.image = image elif self.image is",
": array_like, optional (x,y) >>> tot = 0; im = np.arange(100).reshape((10,10)) >>> for",
"= self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min_y+self.pixels_per_cell[1] bbox = BoundingBox(min_x,max_x,min_y,max_y) min_x,",
"---------- segmented : array_like Superpixel labeled segmentation (like numpy.array) NOTE regionprops expects labels",
"at 1: {1,2,...}. label 0 is treated as unlabeled. image : array_like, optional",
"be of type NoneType\") nrows, ncols = self.image.shape[0:2] stop_x = ncols if self.stop_pt.x",
"(which may exceed the bounds of the image) MODIFICATIONS sgr : optimized sgr",
"% 2 == 1 and pixels_per_cell[1] % 2 == 1, \\ 'provide an",
"getattr(self, self.__slots__[index]) # NOTE IterateOverWindows and IterateOverSuperpixels must share the same iter() interface",
"array_like, optional (x,y) stop_pt : array_like, optional (x,y) >>> tot = 0; im",
"np from collections import namedtuple import skimage.measure #import matplotlib.pyplot as plt #import ipdb",
"like numpy.array (ndim == 2 or 3) ''' def setImage(self, image): ''' Parameters",
">>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n', ret[0])",
"field_name in self.__slots__: yield getattr(self, field_name) def __getitem__(self, index): '''tuple/list style getitem''' return",
"image return self def shape(self): if self.image is None: raise TypeError(\"self.image cannot be",
"segmented : array_like Superpixel labeled segmentation (like numpy.array) NOTE regionprops expects labels to",
"is 0.0 start_pt : array_like, optional (x,y) stop_pt : array_like, optional (x,y) >>>",
"boundaries of the input if ``mode='constant'``. Default is 0.0 start_pt : array_like, optional",
"of the window within the chip BoundingBox bbox : the inclusive extents of",
"> pixels_per_cell/2, it is possible to leave data unseen on the # right/bottom",
"self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2 ystrides_per_image, xstrides_per_image = self.shape() # iterate around the boarder of the",
"3) Returns ------- numpy.array, optional chip : pixels within the current window. Points",
"NOTE regionprops expects labels to be sequential and start at 1: {1,2,...}. label",
"could maybe turn this into a generic mutable namedtuple class Point2D(object): __slots__ =",
"# RE this is more efficient though if self.mode == 'constant' or self.mode",
"pixel_stride <= pixels_per_cell # # NOTE if pixel_stride > pixels_per_cell/2, it is possible",
"np.flipud(np.atleast_2d( # top border chip[:min_y, :, ...])) chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d( #",
"be sequential and start at 1: {1,2,...}. label 0 is treated as unlabeled.",
"'wrap') cval : float, optional Value used for points outside the boundaries of",
"tuple/list style''' for field_name in self.__slots__: yield getattr(self, field_name) def __getitem__(self, index): '''tuple/list",
"image without bounds checking # for additional speedup BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x",
"field_name) def __getitem__(self, index): '''tuple/list style getitem''' return getattr(self, self.__slots__[index]) # NOTE IterateOverWindows",
"label 0 is treated as unlabeled. image : array_like, optional like numpy.array (ndim",
"roi_height = stop_y-self.start_pt.y roi_width = stop_x-self.start_pt.x #print(roi_width, roi_height, self.pixel_stride) nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols",
"in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot) #",
"1: {1,2,...}. label 0 is treated as unlabeled. image : array_like, optional like",
"is None: raise TypeError(\"self.image cannot be of type NoneType\") # regionprops() treats label",
"import numpy as np from collections import namedtuple import skimage.measure #import matplotlib.pyplot as",
"like numpy.array (ndim == 2 or 3) mode : str, optional Points outside",
"window iterator. Parameters ---------- pixels_per_cell : array_like x,y - let x,y be odd",
"collections import namedtuple import skimage.measure #import matplotlib.pyplot as plt #import ipdb # could",
"fields tuple/list style''' for field_name in self.__slots__: yield getattr(self, field_name) def __getitem__(self, index):",
"input are filled according to the given mode. Only ``mode='constant'``, ``mode='discard'`` and ``mode='reflect'``",
"matlab's im2col class IterateOverWindows(object): def __init__(self, pixels_per_cell, pixel_stride=None, image=None, mode='constant', cval=0, start_pt=(0, 0),",
"enforces # pixel_stride <= pixels_per_cell # # NOTE if pixel_stride > pixels_per_cell/2, it",
"test 25000 ''' assert pixels_per_cell[0] % 2 == 1 and pixels_per_cell[1] % 2",
"pixels_per_cell to easily center the window' self.pixels_per_cell = tuple(pixels_per_cell) self.pixel_stride = self.pixels_per_cell if",
"then mode wouldn't # be necessary here and I could simply loop over",
"NOTE IterateOverWindows and IterateOverSuperpixels must share the same iter() interface # TODO create",
"nrows_chunk, ncols_chunk = chunk.shape[0:2] # NOTE assume the points outside the boundaries of",
":ncols_chunk-max_x, ...]) chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( # top-left corner chip[:min_y, :min_x, ...])))",
"self.mode == 'reflect': nrows_chunk, ncols_chunk = chunk.shape[0:2] # NOTE assume the points outside",
"chunk = chip else: assert False, 'unrecognized mode' # FIXME should bbox be",
"self.mode == 'discard': mask = np.ones_like(chip) chunk = chip else: assert False, 'unrecognized",
"bbox class IterateOverSuperpixels(object): def __init__(self, segmented, image=None): self.segmented = segmented self.image = image",
"ncols = self.image.shape[0:2] # NOTE could iterate over the interior of the image",
"corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...]) chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d( # bottom-left corner np.flipud(chip)[:nrows_chunk-max_y,",
"this into a generic mutable namedtuple class Point2D(object): __slots__ = \"x\", \"y\" def",
"image # # this is similar to matlab's im2col class IterateOverWindows(object): def __init__(self,",
"pixel_stride self.image = image self.mode = mode self.cval = cval self.start_pt = Point2D(*(int(s)",
"chip in a fixed-size window # REVIEW I could refactor handling the boarder",
"- min_x) min_x = max(0, -min_x) min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1]",
"bottom-left corner np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...])) elif self.mode == 'discard': mask = np.ones_like(chip) chunk",
"optional (x,y) >>> tot = 0; im = np.arange(100).reshape((10,10)) >>> for i,ret in",
"= self.image.shape[0:2] stop_x = ncols if self.stop_pt.x is None else int(self.stop_pt.x) stop_y =",
">>> print(tot) # weak test 22647 >>> tot = 0; im = np.arange(81).reshape((9,9)).T",
"== 1 and pixels_per_cell[1] % 2 == 1, \\ 'provide an odd number",
"expects labels to be sequential and start at 1: {1,2,...}. label 0 is",
"the boundaries of the input if ``mode='constant'``. Default is 0.0 start_pt : array_like,",
"without bounds checking # for additional speedup BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y",
"x,y - let x,y be odd so the window can be easily centered",
"iterator. Parameters ---------- pixels_per_cell : array_like x,y - let x,y be odd so",
"for field_name in self.__slots__: yield getattr(self, field_name) def __getitem__(self, index): '''tuple/list style getitem'''",
">>> tot = 0; im = np.arange(100).reshape((10,10)) >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ...",
"which enforces # pixel_stride <= pixels_per_cell # # NOTE if pixel_stride > pixels_per_cell/2,",
"np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...])) chunk[max_y:, max_x:, ...] = np.atleast_2d( # bottom-right corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x,",
"NOTE neg indice trikery (flipping first simplifies indexing) chunk[max_y:, min_x:max_x, ...] = np.atleast_2d(",
"...] = np.atleast_2d( # bottom-right corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...]) chunk[max_y:, :min_x, ...] =",
"np.atleast_2d( # bottom border np.flipud(chip)[:nrows_chunk-max_y, :, ...]) chunk[min_y:max_y, max_x:, ...] = np.atleast_2d( #",
"i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot)",
"an image # # this is similar to matlab's im2col class IterateOverWindows(object): def",
"on the # right/bottom boarder of an image # # this is similar",
"nrows - min_y) min_y = max(0, -min_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y)",
"chip = self.image[min_y:max_y, min_x:max_x, ...] # couch chip in a fixed-size window #",
"<= pixels_per_cell # # NOTE if pixel_stride > pixels_per_cell/2, it is possible to",
"...] = np.fliplr(np.flipud(np.atleast_2d( # top-left corner chip[:min_y, :min_x, ...]))) chunk[:min_y, max_x:, ...] =",
"np.fliplr(np.atleast_2d( # left border chip[:, :min_x, ...])) # NOTE neg indice trikery (flipping",
"self.stop_pt.y is None else int(self.stop_pt.y) roi_height = stop_y-self.start_pt.y roi_width = stop_x-self.start_pt.x #print(roi_width, roi_height,",
"self.cval mask = np.zeros(self.pixels_per_cell) min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x =",
"array_like, optional like numpy.array (ndim == 2 or 3) ''' def setImage(self, image):",
"sgr : optimized sgr : turned into a class ''' if image is",
"np.flipud(np.atleast_2d( # top-right corner np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...])) chunk[max_y:, max_x:, ...] = np.atleast_2d( #",
"''' self.image = image return self def iter(self, image=None): '''Next superpixel generator Parameters",
"optional like numpy.array (ndim == 2 or 3) Returns ------- numpy.array, optional chip",
"image elif self.image is None: raise TypeError(\"self.image cannot be of type NoneType\") nrows,",
"roi_height, self.pixel_stride) nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return (nrows, ncols) def iter(self,image=None):",
"or self.mode == 'reflect': chunk = np.empty( self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim ==",
"start at 1: {1,2,...}. label 0 is treated as unlabeled. image : array_like,",
"ipdb # could maybe turn this into a generic mutable namedtuple class Point2D(object):",
"__init__(self, x, y): self.x = x self.y = y def __iter__(self): '''iterate over",
"Sliding window iterator. Parameters ---------- pixels_per_cell : array_like x,y - let x,y be",
"str, optional Points outside the boundaries of the input are filled according to",
"array_like like numpy.array (ndim == 2 or 3) ''' self.image = image return",
"tot = 0; im = np.arange(100).reshape((10,10)) >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ... tot",
"self.image.shape[0:2] stop_x = ncols if self.stop_pt.x is None else int(self.stop_pt.x) stop_y = nrows",
"neg indice trikery (flipping first simplifies indexing) chunk[max_y:, min_x:max_x, ...] = np.atleast_2d( #",
"== 2 or 3) ''' self.image = image return self def shape(self): if",
"be of type NoneType\") nrows, ncols = self.image.shape[0:2] # NOTE could iterate over",
"min_x:max_x, ...] = np.atleast_2d( # bottom border np.flipud(chip)[:nrows_chunk-max_y, :, ...]) chunk[min_y:max_y, max_x:, ...]",
"points outside the boundaries of the input if ``mode='constant'``. Default is 0.0 start_pt",
"# iterate around the boarder of the image for r in xrange(ystrides_per_image): for",
"pixels within the current window. Points outside the boundaries of the input are",
"return self def shape(self): if self.image is None: raise TypeError(\"self.image cannot be of",
"= segmented self.image = image ''' Parameters ---------- segmented : array_like Superpixel labeled",
"could iterate over the interior of the image without bounds checking # for",
"... #print(i, ':\\n', ret[0]) >>> print(tot) # weak test 22647 >>> tot =",
"min_x:max_x] = 1 if self.mode == 'reflect': nrows_chunk, ncols_chunk = chunk.shape[0:2] # NOTE",
"'discard': mask = np.ones_like(chip) chunk = chip else: assert False, 'unrecognized mode' #",
"data unseen on the # right/bottom boarder of an image # # this",
"is None else pixel_stride self.image = image self.mode = mode self.cval = cval",
"else int(self.stop_pt.x) stop_y = nrows if self.stop_pt.y is None else int(self.stop_pt.y) roi_height =",
"max_y = max(0, bbox.min_y), min(nrows, bbox.max_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) chip",
"chip (which may exceed the bounds of the image) MODIFICATIONS sgr : optimized",
"if self.stop_pt.x is None else int(self.stop_pt.x) stop_y = nrows if self.stop_pt.y is None",
"self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min_x+self.pixels_per_cell[0] min_y = self.start_pt.y + self.pixel_stride[1]*r",
"float, optional Value used for points outside the boundaries of the input if",
"int(self.stop_pt.x) stop_y = nrows if self.stop_pt.y is None else int(self.stop_pt.y) roi_height = stop_y-self.start_pt.y",
"current window. Points outside the boundaries of the input are filled according to",
"max(0, bbox.min_y), min(nrows, bbox.max_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) chip = self.image[min_y:max_y,",
"mode='constant', cval=0, start_pt=(0, 0), stop_pt=(None, None)): ''' Sliding window iterator. Parameters ---------- pixels_per_cell",
"mask = np.ones_like(chip) chunk = chip else: assert False, 'unrecognized mode' # FIXME",
"the inclusive extents of the chip (which may exceed the bounds of the",
"r in xrange(ystrides_per_image): for c in xrange(xstrides_per_image): # chip out pixels in this",
"outside the boundaries of input can be filled from chip. # this seems",
"ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return (nrows, ncols) def iter(self,image=None): '''Next window generator Parameters ----------",
"segmented self.image = image ''' Parameters ---------- segmented : array_like Superpixel labeled segmentation",
"# bottom-left corner np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...])) elif self.mode == 'discard': mask = np.ones_like(chip)",
"for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n', ret[0]) >>>",
"skimage.measure #import matplotlib.pyplot as plt #import ipdb # could maybe turn this into",
"...] = np.flipud(np.atleast_2d( # top border chip[:min_y, :, ...])) chunk[min_y:max_y, :min_x, ...] =",
"into pad_image(). then mode wouldn't # be necessary here and I could simply",
"min_x) min_x = max(0, -min_x) min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y",
"easily center the window' self.pixels_per_cell = tuple(pixels_per_cell) self.pixel_stride = self.pixels_per_cell if pixel_stride is",
"# for additional speedup BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") pixels_per_half_cell =",
"...] = np.flipud(np.atleast_2d( # top-right corner np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...])) chunk[max_y:, max_x:, ...] =",
"---------- pixels_per_cell : array_like x,y - let x,y be odd so the window",
"# TODO remove small, unconnected components properties = skimage.measure.regionprops(self.segmented) BoundingBox = namedtuple(\"BoundingBox\", \"min_x",
"np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...])) elif self.mode == 'discard': mask = np.ones_like(chip) chunk = chip",
"np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return (nrows, ncols) def iter(self,image=None): '''Next window generator Parameters ---------- image :",
"tot = 0; im = np.arange(81).reshape((9,9)).T >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ... tot",
"to be sequential and start at 1: {1,2,...}. label 0 is treated as",
"stop_x-self.start_pt.x #print(roi_width, roi_height, self.pixel_stride) nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return (nrows, ncols)",
"= np.arange(100).reshape((10,10)) >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ... tot += ret[0].sum() ... #print(i,",
"BoundingBox(min_x,max_x,min_y,max_y) min_x, max_x = max(0, bbox.min_x), min(ncols, bbox.max_x) min_y, max_y = max(0, bbox.min_y),",
"# bottom border np.flipud(chip)[:nrows_chunk-max_y, :, ...]) chunk[min_y:max_y, max_x:, ...] = np.atleast_2d( # right",
"can be filled from chip. # this seems harder than it should be...",
"chip. # this seems harder than it should be... chunk[:min_y, min_x:max_x, ...] =",
"return self def iter(self, image=None): '''Next superpixel generator Parameters ---------- image : array_like,",
": str, optional Points outside the boundaries of the input are filled according",
"or 3) mode : str, optional Points outside the boundaries of the input",
"ret[0]) >>> print(tot) # weak test 25000 ''' assert pixels_per_cell[0] % 2 ==",
"np.empty( self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3 else ()), dtype=self.image.dtype.type) chunk[:] =",
"''' if image is not None: self.image = image elif self.image is None:",
"min_y+self.pixels_per_cell[1] bbox = BoundingBox(min_x,max_x,min_y,max_y) min_x, max_x = max(0, bbox.min_x), min(ncols, bbox.max_x) min_y, max_y",
"label zero (0) as unlabeled and ignores it # TODO remove small, unconnected",
"chunk = np.empty( self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3 else ()), dtype=self.image.dtype.type)",
"= min(self.pixels_per_cell[1], nrows - min_y) min_y = max(0, -min_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x,",
"optional Value used for points outside the boundaries of the input if ``mode='constant'``.",
"chunk[min_y:max_y, min_x:max_x, ...] = chip mask[min_y:max_y, min_x:max_x] = 1 if self.mode == 'reflect':",
"= chunk.shape[0:2] # NOTE assume the points outside the boundaries of input can",
"max_y\") for rp in properties: if rp._slice is None: continue (min_y,min_x,max_y,max_x) = rp.bbox",
"mode. Only ``mode='constant'``, ``mode='discard'`` and ``mode='reflect'`` are currently supported, although others could be",
"array_like, optional like numpy.array (ndim == 2 or 3) Returns ------- numpy.array, optional",
"class IterateOverSuperpixels(object): def __init__(self, segmented, image=None): self.segmented = segmented self.image = image '''",
"maybe turn this into a generic mutable namedtuple class Point2D(object): __slots__ = \"x\",",
"this is more efficient though if self.mode == 'constant' or self.mode == 'reflect':",
"raise TypeError(\"self.image cannot be of type NoneType\") # regionprops() treats label zero (0)",
"image : array_like, optional like numpy.array (ndim == 2 or 3) ''' def",
"chip out pixels in this sliding window min_x = self.start_pt.x + self.pixel_stride[0]*c -",
"= image[min_y:max_y, min_x:max_x,...] mask = rp.filled_image bbox = BoundingBox(min_x,max_x-1,min_y,max_y-1) yield (chip, mask, bbox)",
"around the boarder of the image for r in xrange(ystrides_per_image): for c in",
"like numpy.array (ndim == 2 or 3) Returns ------- numpy.array, optional chip :",
"- pixels_per_half_cell[0] max_x = min(self.pixels_per_cell[0], ncols - min_x) min_x = max(0, -min_x) min_y",
"min_x, max_x = max(0, bbox.min_x), min(ncols, bbox.max_x) min_y, max_y = max(0, bbox.min_y), min(nrows,",
"yield getattr(self, field_name) def __getitem__(self, index): '''tuple/list style getitem''' return getattr(self, self.__slots__[index]) #",
"of type NoneType\") nrows, ncols = self.image.shape[0:2] stop_x = ncols if self.stop_pt.x is",
"window generator Parameters ---------- image : array_like like numpy.array (ndim == 2 or",
"min(ncols, bbox.max_x) min_y, max_y = max(0, bbox.min_y), min(nrows, bbox.max_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x,",
"= self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min(self.pixels_per_cell[1], nrows - min_y) min_y",
"def __init__(self, segmented, image=None): self.segmented = segmented self.image = image ''' Parameters ----------",
"properties: if rp._slice is None: continue (min_y,min_x,max_y,max_x) = rp.bbox chip = image[min_y:max_y, min_x:max_x,...]",
"according to the given mode. numpy.array mask : the binary mask of the",
": array_like, optional like numpy.array (ndim == 2 or 3) ''' def setImage(self,",
"pixels_per_cell/2, it is possible to leave data unseen on the # right/bottom boarder",
"stop_x = ncols if self.stop_pt.x is None else int(self.stop_pt.x) stop_y = nrows if",
"as np from collections import namedtuple import skimage.measure #import matplotlib.pyplot as plt #import",
"let x,y be odd so the window can be easily centered pixel_stride :",
"# this seems harder than it should be... chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d(",
"3) ''' self.image = image return self def iter(self, image=None): '''Next superpixel generator",
"== 2 or 3) Returns ------- numpy.array, optional chip : pixels within the",
"# this is similar to matlab's im2col class IterateOverWindows(object): def __init__(self, pixels_per_cell, pixel_stride=None,",
"array_like Superpixel labeled segmentation (like numpy.array) NOTE regionprops expects labels to be sequential",
"the image) MODIFICATIONS sgr : optimized sgr : turned into a class '''",
"ncols) def iter(self,image=None): '''Next window generator Parameters ---------- image : array_like like numpy.array",
"centered pixel_stride : array_like, optional x,y image : array_like, optional like numpy.array (ndim",
"self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min(self.pixels_per_cell[0], ncols - min_x) min_x = max(0, -min_x)",
"'''iterate over fields tuple/list style''' for field_name in self.__slots__: yield getattr(self, field_name) def",
"or 3) Returns ------- numpy.array, optional chip : pixels within the current window.",
"bbox.max_x) min_y, max_y = max(0, bbox.min_y), min(nrows, bbox.max_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y,",
"...])) elif self.mode == 'discard': mask = np.ones_like(chip) chunk = chip else: assert",
"TypeError(\"self.image cannot be of type NoneType\") nrows, ncols = self.image.shape[0:2] # NOTE could",
"in this sliding window min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x =",
"tot += ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot) # weak test 25000",
"...]) chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( # top-left corner chip[:min_y, :min_x, ...]))) chunk[:min_y,",
"':\\n', ret[0]) >>> print(tot) # weak test 25000 ''' assert pixels_per_cell[0] % 2",
"min(self.pixels_per_cell[1], nrows - min_y) min_y = max(0, -min_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y,",
"the boarder of the image for r in xrange(ystrides_per_image): for c in xrange(xstrides_per_image):",
"odd number for pixels_per_cell to easily center the window' self.pixels_per_cell = tuple(pixels_per_cell) self.pixel_stride",
"None: raise TypeError(\"self.image cannot be of type NoneType\") nrows, ncols = self.image.shape[0:2] #",
"chunk.shape[0:2] # NOTE assume the points outside the boundaries of input can be",
"pad_image(). then mode wouldn't # be necessary here and I could simply loop",
"chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( # top-left corner chip[:min_y, :min_x, ...]))) chunk[:min_y, max_x:,",
"\"min_x max_x min_y max_y\") pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2 ystrides_per_image, xstrides_per_image = self.shape() #",
"Parameters ---------- segmented : array_like Superpixel labeled segmentation (like numpy.array) NOTE regionprops expects",
"chip mask[min_y:max_y, min_x:max_x] = 1 if self.mode == 'reflect': nrows_chunk, ncols_chunk = chunk.shape[0:2]",
"- pixels_per_half_cell[1] max_y = min(self.pixels_per_cell[1], nrows - min_y) min_y = max(0, -min_y) #print('c=%d'%c,",
"#print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) #print() chunk[min_y:max_y, min_x:max_x, ...] = chip mask[min_y:max_y,",
"TypeError(\"self.image cannot be of type NoneType\") nrows, ncols = self.image.shape[0:2] stop_x = ncols",
"MODIFICATIONS sgr : turned into a class sgr : added mode='reflect' ''' if",
"...] = chip mask[min_y:max_y, min_x:max_x] = 1 if self.mode == 'reflect': nrows_chunk, ncols_chunk",
"of the input are filled according to the given mode. numpy.array mask :",
"print(tot) # weak test 25000 ''' assert pixels_per_cell[0] % 2 == 1 and",
"cval : float, optional Value used for points outside the boundaries of the",
"FIXME should bbox be max-1 like in the superpixel version yield chunk, mask,",
"index): '''tuple/list style getitem''' return getattr(self, self.__slots__[index]) # NOTE IterateOverWindows and IterateOverSuperpixels must",
"boarder into pad_image(). then mode wouldn't # be necessary here and I could",
"points outside the boundaries of input can be filled from chip. # this",
"NOTE could iterate over the interior of the image without bounds checking #",
"bbox.min_y), min(nrows, bbox.max_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) chip = self.image[min_y:max_y, min_x:max_x,",
"self.mode == 'constant' or self.mode == 'reflect': chunk = np.empty( self.pixels_per_cell + ((self.image.shape[2],)",
"self.image.ndim == 3 else ()), dtype=self.image.dtype.type) chunk[:] = self.cval mask = np.zeros(self.pixels_per_cell) min_x",
": array_like, optional like numpy.array (ndim == 2 or 3) Returns ------- numpy.array,",
"False, 'unrecognized mode' # FIXME should bbox be max-1 like in the superpixel",
"start_pt=(0, 0), stop_pt=(None, None)): ''' Sliding window iterator. Parameters ---------- pixels_per_cell : array_like",
"= Point2D(*(stop_pt)) def setImage(self, image): ''' Parameters ---------- image : array_like like numpy.array",
"an odd number for pixels_per_cell to easily center the window' self.pixels_per_cell = tuple(pixels_per_cell)",
"'nearest' and 'wrap') cval : float, optional Value used for points outside the",
"= np.empty( self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3 else ()), dtype=self.image.dtype.type) chunk[:]",
"= max(0, bbox.min_y), min(nrows, bbox.max_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) chip =",
"for additional speedup BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") pixels_per_half_cell = self.pixels_per_cell[0]//2,",
"'''Next window generator Parameters ---------- image : array_like like numpy.array (ndim == 2",
"for c in xrange(xstrides_per_image): # chip out pixels in this sliding window min_x",
"chunk[max_y:, max_x:, ...] = np.atleast_2d( # bottom-right corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...]) chunk[max_y:, :min_x,",
"in start_pt)) self.stop_pt = Point2D(*(stop_pt)) def setImage(self, image): ''' Parameters ---------- image :",
"BoundingBox bbox : the inclusive extents of the chip (which may exceed the",
"this sliding window min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min_x+self.pixels_per_cell[0]",
"= BoundingBox(min_x,max_x,min_y,max_y) min_x, max_x = max(0, bbox.min_x), min(ncols, bbox.max_x) min_y, max_y = max(0,",
"-min_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) #print() chunk[min_y:max_y, min_x:max_x, ...] = chip",
"self.stop_pt.x is None else int(self.stop_pt.x) stop_y = nrows if self.stop_pt.y is None else",
"MODIFICATIONS sgr : optimized sgr : turned into a class ''' if image",
"bbox be max-1 like in the superpixel version yield chunk, mask, bbox class",
"None: raise TypeError(\"self.image cannot be of type NoneType\") # regionprops() treats label zero",
"similar to matlab's im2col class IterateOverWindows(object): def __init__(self, pixels_per_cell, pixel_stride=None, image=None, mode='constant', cval=0,",
"the input are filled according to the given mode. numpy.array mask : the",
"''' self.image = image return self def shape(self): if self.image is None: raise",
"sgr : turned into a class sgr : added mode='reflect' ''' if image",
"or 3) ''' self.image = image return self def iter(self, image=None): '''Next superpixel",
"= image return self def shape(self): if self.image is None: raise TypeError(\"self.image cannot",
"the bounds of the image) MODIFICATIONS sgr : turned into a class sgr",
"(like numpy.array) NOTE regionprops expects labels to be sequential and start at 1:",
"nrows, ncols = self.image.shape[0:2] # NOTE could iterate over the interior of the",
"2 == 1 and pixels_per_cell[1] % 2 == 1, \\ 'provide an odd",
"= np.ones_like(chip) chunk = chip else: assert False, 'unrecognized mode' # FIXME should",
"int(self.stop_pt.y) roi_height = stop_y-self.start_pt.y roi_width = stop_x-self.start_pt.x #print(roi_width, roi_height, self.pixel_stride) nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int)",
"= y def __iter__(self): '''iterate over fields tuple/list style''' for field_name in self.__slots__:",
"# weak test 22647 >>> tot = 0; im = np.arange(81).reshape((9,9)).T >>> for",
"c in xrange(xstrides_per_image): # chip out pixels in this sliding window min_x =",
"= image self.mode = mode self.cval = cval self.start_pt = Point2D(*(int(s) for s",
"= self.shape() # iterate around the boarder of the image for r in",
"trikery (flipping first simplifies indexing) chunk[max_y:, min_x:max_x, ...] = np.atleast_2d( # bottom border",
"pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2 ystrides_per_image, xstrides_per_image = self.shape() # iterate around the boarder",
"max(0, -min_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) #print() chunk[min_y:max_y, min_x:max_x, ...] =",
"np.fliplr(chip)[:, :ncols_chunk-max_x, ...]) chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( # top-left corner chip[:min_y, :min_x,",
"self.shape() # iterate around the boarder of the image for r in xrange(ystrides_per_image):",
"image : array_like like numpy.array (ndim == 2 or 3) ''' self.image =",
"class IterateOverWindows(object): def __init__(self, pixels_per_cell, pixel_stride=None, image=None, mode='constant', cval=0, start_pt=(0, 0), stop_pt=(None, None)):",
"xrange(ystrides_per_image): for c in xrange(xstrides_per_image): # chip out pixels in this sliding window",
"== 'constant' or self.mode == 'reflect': chunk = np.empty( self.pixels_per_cell + ((self.image.shape[2],) if",
"of the input if ``mode='constant'``. Default is 0.0 start_pt : array_like, optional (x,y)",
"I could simply loop over the image. # RE this is more efficient",
"self.image = image return self def shape(self): if self.image is None: raise TypeError(\"self.image",
"indice trikery (flipping first simplifies indexing) chunk[max_y:, min_x:max_x, ...] = np.atleast_2d( # bottom",
"min_y, max_y) #print() chunk[min_y:max_y, min_x:max_x, ...] = chip mask[min_y:max_y, min_x:max_x] = 1 if",
"= \"x\", \"y\" def __init__(self, x, y): self.x = x self.y = y",
"(0) as unlabeled and ignores it # TODO remove small, unconnected components properties",
"according to the given mode. Only ``mode='constant'``, ``mode='discard'`` and ``mode='reflect'`` are currently supported,",
"IterateOverWindows(object): def __init__(self, pixels_per_cell, pixel_stride=None, image=None, mode='constant', cval=0, start_pt=(0, 0), stop_pt=(None, None)): '''",
"from chip. # this seems harder than it should be... chunk[:min_y, min_x:max_x, ...]",
"superpixel generator Parameters ---------- image : array_like, optional like numpy.array (ndim == 2",
"if rp._slice is None: continue (min_y,min_x,max_y,max_x) = rp.bbox chip = image[min_y:max_y, min_x:max_x,...] mask",
"image=None, mode='constant', cval=0, start_pt=(0, 0), stop_pt=(None, None)): ''' Sliding window iterator. Parameters ----------",
"bbox = BoundingBox(min_x,max_x,min_y,max_y) min_x, max_x = max(0, bbox.min_x), min(ncols, bbox.max_x) min_y, max_y =",
"+ self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min(self.pixels_per_cell[1], nrows - min_y) min_y = max(0,",
"corner chip[:min_y, :min_x, ...]))) chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d( # top-right corner np.fliplr(chip)[:min_y,",
"boarder of an image # # this is similar to matlab's im2col class",
"# # this is similar to matlab's im2col class IterateOverWindows(object): def __init__(self, pixels_per_cell,",
"image : array_like, optional like numpy.array (ndim == 2 or 3) mode :",
"like numpy.array (ndim == 2 or 3) ''' self.image = image return self",
"handling the boarder into pad_image(). then mode wouldn't # be necessary here and",
"= min_y+self.pixels_per_cell[1] bbox = BoundingBox(min_x,max_x,min_y,max_y) min_x, max_x = max(0, bbox.min_x), min(ncols, bbox.max_x) min_y,",
"# NOTE IterateOverWindows and IterateOverSuperpixels must share the same iter() interface # TODO",
"simply loop over the image. # RE this is more efficient though if",
"None)): ''' Sliding window iterator. Parameters ---------- pixels_per_cell : array_like x,y - let",
"into a class ''' if image is not None: self.image = image elif",
"1 if self.mode == 'reflect': nrows_chunk, ncols_chunk = chunk.shape[0:2] # NOTE assume the",
"chip[:min_y, :min_x, ...]))) chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d( # top-right corner np.fliplr(chip)[:min_y, :ncols_chunk-max_x,",
"mutable namedtuple class Point2D(object): __slots__ = \"x\", \"y\" def __init__(self, x, y): self.x",
"unlabeled and ignores it # TODO remove small, unconnected components properties = skimage.measure.regionprops(self.segmented)",
"raise TypeError(\"self.image cannot be of type NoneType\") nrows, ncols = self.image.shape[0:2] # NOTE",
"# chip out pixels in this sliding window min_x = self.start_pt.x + self.pixel_stride[0]*c",
"= namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2 ystrides_per_image, xstrides_per_image =",
"numpy.array (ndim == 2 or 3) ''' def setImage(self, image): ''' Parameters ----------",
"(ndim == 2 or 3) ''' self.image = image return self def shape(self):",
"sliding window min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min_x+self.pixels_per_cell[0] min_y",
"self.image = image return self def iter(self, image=None): '''Next superpixel generator Parameters ----------",
"---------- image : array_like like numpy.array (ndim == 2 or 3) ''' self.image",
"chip[:min_y, :, ...])) chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d( # left border chip[:, :min_x,",
"0), stop_pt=(None, None)): ''' Sliding window iterator. Parameters ---------- pixels_per_cell : array_like x,y",
"a fixed-size window # REVIEW I could refactor handling the boarder into pad_image().",
"22647 >>> tot = 0; im = np.arange(81).reshape((9,9)).T >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)):",
"it is possible to leave data unseen on the # right/bottom boarder of",
"the input if ``mode='constant'``. Default is 0.0 start_pt : array_like, optional (x,y) stop_pt",
"self.mode = mode self.cval = cval self.start_pt = Point2D(*(int(s) for s in start_pt))",
"np.arange(100).reshape((10,10)) >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n',",
"== 'reflect': chunk = np.empty( self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3 else",
"% 2 == 1, \\ 'provide an odd number for pixels_per_cell to easily",
"#print(i, ':\\n', ret[0]) >>> print(tot) # weak test 22647 >>> tot = 0;",
"max_x, min_y, max_y) #print() chunk[min_y:max_y, min_x:max_x, ...] = chip mask[min_y:max_y, min_x:max_x] = 1",
":min_x, ...] = np.fliplr(np.atleast_2d( # left border chip[:, :min_x, ...])) # NOTE neg",
"stop_y-self.start_pt.y roi_width = stop_x-self.start_pt.x #print(roi_width, roi_height, self.pixel_stride) nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int)",
"= nrows if self.stop_pt.y is None else int(self.stop_pt.y) roi_height = stop_y-self.start_pt.y roi_width =",
"# be necessary here and I could simply loop over the image. #",
"unlabeled. image : array_like, optional like numpy.array (ndim == 2 or 3) '''",
"if self.image.ndim == 3 else ()), dtype=self.image.dtype.type) chunk[:] = self.cval mask = np.zeros(self.pixels_per_cell)",
"= min(self.pixels_per_cell[0], ncols - min_x) min_x = max(0, -min_x) min_y = self.start_pt.y +",
"the bounds of the image) MODIFICATIONS sgr : optimized sgr : turned into",
"np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...]) chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d( # bottom-left corner np.flipud(chip)[:nrows_chunk-max_y, :min_x,",
"class sgr : added mode='reflect' ''' if image is not None: self.image =",
"the given mode. numpy.array mask : the binary mask of the window within",
"min_x:max_x, ...] = chip mask[min_y:max_y, min_x:max_x] = 1 if self.mode == 'reflect': nrows_chunk,",
"# top-left corner chip[:min_y, :min_x, ...]))) chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d( # top-right",
"= np.atleast_2d( # right border np.fliplr(chip)[:, :ncols_chunk-max_x, ...]) chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d(",
"speedup BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2 ystrides_per_image,",
"if self.stop_pt.y is None else int(self.stop_pt.y) roi_height = stop_y-self.start_pt.y roi_width = stop_x-self.start_pt.x #print(roi_width,",
"None: continue (min_y,min_x,max_y,max_x) = rp.bbox chip = image[min_y:max_y, min_x:max_x,...] mask = rp.filled_image bbox",
"cval self.start_pt = Point2D(*(int(s) for s in start_pt)) self.stop_pt = Point2D(*(stop_pt)) def setImage(self,",
"image : array_like, optional like numpy.array (ndim == 2 or 3) Returns -------",
"added mode='reflect' ''' if image is not None: self.image = image elif self.image",
"interior of the image without bounds checking # for additional speedup BoundingBox =",
"cannot be of type NoneType\") nrows, ncols = self.image.shape[0:2] # NOTE could iterate",
"max_y\") pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2 ystrides_per_image, xstrides_per_image = self.shape() # iterate around the",
"is possible to leave data unseen on the # right/bottom boarder of an",
"Value used for points outside the boundaries of the input if ``mode='constant'``. Default",
"if self.mode == 'reflect': nrows_chunk, ncols_chunk = chunk.shape[0:2] # NOTE assume the points",
"3) mode : str, optional Points outside the boundaries of the input are",
"odd so the window can be easily centered pixel_stride : array_like, optional x,y",
"within the chip BoundingBox bbox : the inclusive extents of the chip (which",
"NoneType\") # regionprops() treats label zero (0) as unlabeled and ignores it #",
"window min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min_x+self.pixels_per_cell[0] min_y =",
"min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min(self.pixels_per_cell[0], ncols - min_x)",
"{1,2,...}. label 0 is treated as unlabeled. image : array_like, optional like numpy.array",
"turn this into a generic mutable namedtuple class Point2D(object): __slots__ = \"x\", \"y\"",
"= self.pixels_per_cell if pixel_stride is None else pixel_stride self.image = image self.mode =",
"max_x min_y max_y\") for rp in properties: if rp._slice is None: continue (min_y,min_x,max_y,max_x)",
"if ``mode='constant'``. Default is 0.0 start_pt : array_like, optional (x,y) stop_pt : array_like,",
"or 3) ''' self.image = image return self def shape(self): if self.image is",
"so the window can be easily centered pixel_stride : array_like, optional x,y image",
"although others could be added (e.g., 'nearest' and 'wrap') cval : float, optional",
"else int(self.stop_pt.y) roi_height = stop_y-self.start_pt.y roi_width = stop_x-self.start_pt.x #print(roi_width, roi_height, self.pixel_stride) nrows =",
"TypeError(\"self.image cannot be of type NoneType\") # regionprops() treats label zero (0) as",
"could be added (e.g., 'nearest' and 'wrap') cval : float, optional Value used",
":ncols_chunk-max_x, ...]) chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d( # bottom-left corner np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...]))",
"Default is 0.0 start_pt : array_like, optional (x,y) stop_pt : array_like, optional (x,y)",
"style getitem''' return getattr(self, self.__slots__[index]) # NOTE IterateOverWindows and IterateOverSuperpixels must share the",
"window can be easily centered pixel_stride : array_like, optional x,y image : array_like,",
"enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot) # weak",
"bbox.max_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) chip = self.image[min_y:max_y, min_x:max_x, ...] #",
"-min_x) min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min(self.pixels_per_cell[1], nrows -",
"print(tot) # weak test 22647 >>> tot = 0; im = np.arange(81).reshape((9,9)).T >>>",
"if self.image is None: raise TypeError(\"self.image cannot be of type NoneType\") nrows, ncols",
": the binary mask of the window within the chip BoundingBox bbox :",
"0; im = np.arange(100).reshape((10,10)) >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ... tot += ret[0].sum()",
"max_x = min_x+self.pixels_per_cell[0] min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min_y+self.pixels_per_cell[1]",
"'r=%d'%r, min_x, max_x, min_y, max_y) #print() chunk[min_y:max_y, min_x:max_x, ...] = chip mask[min_y:max_y, min_x:max_x]",
"same iter() interface # TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces # pixel_stride <= pixels_per_cell",
"as unlabeled and ignores it # TODO remove small, unconnected components properties =",
"pixels_per_cell[0] % 2 == 1 and pixels_per_cell[1] % 2 == 1, \\ 'provide",
"raise TypeError(\"self.image cannot be of type NoneType\") nrows, ncols = self.image.shape[0:2] stop_x =",
"= np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return (nrows, ncols) def iter(self,image=None): '''Next window generator",
"min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min_y+self.pixels_per_cell[1] bbox = BoundingBox(min_x,max_x,min_y,max_y)",
"into a generic mutable namedtuple class Point2D(object): __slots__ = \"x\", \"y\" def __init__(self,",
"optional x,y image : array_like, optional like numpy.array (ndim == 2 or 3)",
"from collections import namedtuple import skimage.measure #import matplotlib.pyplot as plt #import ipdb #",
"of type NoneType\") nrows, ncols = self.image.shape[0:2] # NOTE could iterate over the",
"pixels_per_half_cell[1] max_y = min_y+self.pixels_per_cell[1] bbox = BoundingBox(min_x,max_x,min_y,max_y) min_x, max_x = max(0, bbox.min_x), min(ncols,",
"turned into a class sgr : added mode='reflect' ''' if image is not",
":min_x, ...])) # NOTE neg indice trikery (flipping first simplifies indexing) chunk[max_y:, min_x:max_x,",
"numpy.array) NOTE regionprops expects labels to be sequential and start at 1: {1,2,...}.",
"None else int(self.stop_pt.y) roi_height = stop_y-self.start_pt.y roi_width = stop_x-self.start_pt.x #print(roi_width, roi_height, self.pixel_stride) nrows",
"self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min(self.pixels_per_cell[1], nrows - min_y) min_y =",
"the points outside the boundaries of input can be filled from chip. #",
"image elif self.image is None: raise TypeError(\"self.image cannot be of type NoneType\") #",
"max_x = max(0, bbox.min_x), min(ncols, bbox.max_x) min_y, max_y = max(0, bbox.min_y), min(nrows, bbox.max_y)",
"self.pixel_stride = self.pixels_per_cell if pixel_stride is None else pixel_stride self.image = image self.mode",
"mask, bbox class IterateOverSuperpixels(object): def __init__(self, segmented, image=None): self.segmented = segmented self.image =",
"for pixels_per_cell to easily center the window' self.pixels_per_cell = tuple(pixels_per_cell) self.pixel_stride = self.pixels_per_cell",
"I could refactor handling the boarder into pad_image(). then mode wouldn't # be",
"pixels_per_half_cell[1] max_y = min(self.pixels_per_cell[1], nrows - min_y) min_y = max(0, -min_y) #print('c=%d'%c, 'r=%d'%r,",
"for r in xrange(ystrides_per_image): for c in xrange(xstrides_per_image): # chip out pixels in",
"or 3) ''' def setImage(self, image): ''' Parameters ---------- image : array_like like",
": array_like like numpy.array (ndim == 2 or 3) Returns ------- numpy.array, optional",
"def shape(self): if self.image is None: raise TypeError(\"self.image cannot be of type NoneType\")",
"(x,y) stop_pt : array_like, optional (x,y) >>> tot = 0; im = np.arange(100).reshape((10,10))",
"array_like like numpy.array (ndim == 2 or 3) Returns ------- numpy.array, optional chip",
"in xrange(ystrides_per_image): for c in xrange(xstrides_per_image): # chip out pixels in this sliding",
"pixels_per_cell # # NOTE if pixel_stride > pixels_per_cell/2, it is possible to leave",
": float, optional Value used for points outside the boundaries of the input",
"is None else int(self.stop_pt.x) stop_y = nrows if self.stop_pt.y is None else int(self.stop_pt.y)",
"TODO remove small, unconnected components properties = skimage.measure.regionprops(self.segmented) BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x",
"if pixel_stride is None else pixel_stride self.image = image self.mode = mode self.cval",
"mode self.cval = cval self.start_pt = Point2D(*(int(s) for s in start_pt)) self.stop_pt =",
"of input can be filled from chip. # this seems harder than it",
"chip = image[min_y:max_y, min_x:max_x,...] mask = rp.filled_image bbox = BoundingBox(min_x,max_x-1,min_y,max_y-1) yield (chip, mask,",
"__slots__ = \"x\", \"y\" def __init__(self, x, y): self.x = x self.y =",
"for s in start_pt)) self.stop_pt = Point2D(*(stop_pt)) def setImage(self, image): ''' Parameters ----------",
"the given mode. Only ``mode='constant'``, ``mode='discard'`` and ``mode='reflect'`` are currently supported, although others",
"the current window. Points outside the boundaries of the input are filled according",
"Superpixel labeled segmentation (like numpy.array) NOTE regionprops expects labels to be sequential and",
"= self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2 ystrides_per_image, xstrides_per_image = self.shape() # iterate around the boarder of",
"checking # for additional speedup BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") pixels_per_half_cell",
"should bbox be max-1 like in the superpixel version yield chunk, mask, bbox",
"#print() chunk[min_y:max_y, min_x:max_x, ...] = chip mask[min_y:max_y, min_x:max_x] = 1 if self.mode ==",
"sgr : added mode='reflect' ''' if image is not None: self.image = image",
"of the image) MODIFICATIONS sgr : turned into a class sgr : added",
"REVIEW I could refactor handling the boarder into pad_image(). then mode wouldn't #",
"def __getitem__(self, index): '''tuple/list style getitem''' return getattr(self, self.__slots__[index]) # NOTE IterateOverWindows and",
"labeled segmentation (like numpy.array) NOTE regionprops expects labels to be sequential and start",
"def iter(self,image=None): '''Next window generator Parameters ---------- image : array_like like numpy.array (ndim",
"zero (0) as unlabeled and ignores it # TODO remove small, unconnected components",
"== 2 or 3) ''' def setImage(self, image): ''' Parameters ---------- image :",
"= x self.y = y def __iter__(self): '''iterate over fields tuple/list style''' for",
"else ()), dtype=self.image.dtype.type) chunk[:] = self.cval mask = np.zeros(self.pixels_per_cell) min_x = self.start_pt.x +",
"for rp in properties: if rp._slice is None: continue (min_y,min_x,max_y,max_x) = rp.bbox chip",
": pixels within the current window. Points outside the boundaries of the input",
"into a class sgr : added mode='reflect' ''' if image is not None:",
"weak test 22647 >>> tot = 0; im = np.arange(81).reshape((9,9)).T >>> for i,ret",
"nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return (nrows, ncols) def iter(self,image=None): '''Next window",
"the binary mask of the window within the chip BoundingBox bbox : the",
"'reflect': chunk = np.empty( self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3 else ()),",
"ncols_chunk = chunk.shape[0:2] # NOTE assume the points outside the boundaries of input",
"------- numpy.array, optional chip : pixels within the current window. Points outside the",
"= image ''' Parameters ---------- segmented : array_like Superpixel labeled segmentation (like numpy.array)",
"must share the same iter() interface # TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces #",
"bbox : the inclusive extents of the chip (which may exceed the bounds",
"\"x\", \"y\" def __init__(self, x, y): self.x = x self.y = y def",
"im2col class IterateOverWindows(object): def __init__(self, pixels_per_cell, pixel_stride=None, image=None, mode='constant', cval=0, start_pt=(0, 0), stop_pt=(None,",
"# top border chip[:min_y, :, ...])) chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d( # left",
"ncols = self.image.shape[0:2] stop_x = ncols if self.stop_pt.x is None else int(self.stop_pt.x) stop_y",
"start_pt : array_like, optional (x,y) stop_pt : array_like, optional (x,y) >>> tot =",
"(ndim == 2 or 3) mode : str, optional Points outside the boundaries",
"max_y) chip = self.image[min_y:max_y, min_x:max_x, ...] # couch chip in a fixed-size window",
"self.image[min_y:max_y, min_x:max_x, ...] # couch chip in a fixed-size window # REVIEW I",
"is None else int(self.stop_pt.y) roi_height = stop_y-self.start_pt.y roi_width = stop_x-self.start_pt.x #print(roi_width, roi_height, self.pixel_stride)",
"bounds of the image) MODIFICATIONS sgr : turned into a class sgr :",
"chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d( # top-right corner np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...])) chunk[max_y:, max_x:,",
"max-1 like in the superpixel version yield chunk, mask, bbox class IterateOverSuperpixels(object): def",
"border chip[:min_y, :, ...])) chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d( # left border chip[:,",
"is similar to matlab's im2col class IterateOverWindows(object): def __init__(self, pixels_per_cell, pixel_stride=None, image=None, mode='constant',",
"max(0, -min_x) min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min(self.pixels_per_cell[1], nrows",
"border np.fliplr(chip)[:, :ncols_chunk-max_x, ...]) chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( # top-left corner chip[:min_y,",
"(ndim == 2 or 3) ''' self.image = image return self def iter(self,",
"= skimage.measure.regionprops(self.segmented) BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") for rp in properties:",
"mode' # FIXME should bbox be max-1 like in the superpixel version yield",
"possible to leave data unseen on the # right/bottom boarder of an image",
"self.__slots__: yield getattr(self, field_name) def __getitem__(self, index): '''tuple/list style getitem''' return getattr(self, self.__slots__[index])",
"self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min_y+self.pixels_per_cell[1] bbox = BoundingBox(min_x,max_x,min_y,max_y) min_x, max_x = max(0,",
"top border chip[:min_y, :, ...])) chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d( # left border",
"self.x = x self.y = y def __iter__(self): '''iterate over fields tuple/list style'''",
"the window can be easily centered pixel_stride : array_like, optional x,y image :",
"class ''' if image is not None: self.image = image elif self.image is",
"self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3 else ()), dtype=self.image.dtype.type) chunk[:] = self.cval",
"min_x:max_x, ...] # couch chip in a fixed-size window # REVIEW I could",
"min_y, max_y) chip = self.image[min_y:max_y, min_x:max_x, ...] # couch chip in a fixed-size",
"= self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min_x+self.pixels_per_cell[0] min_y = self.start_pt.y +",
"type NoneType\") nrows, ncols = self.image.shape[0:2] stop_x = ncols if self.stop_pt.x is None",
"NOTE assume the points outside the boundaries of input can be filled from",
"in the superpixel version yield chunk, mask, bbox class IterateOverSuperpixels(object): def __init__(self, segmented,",
"to the given mode. Only ``mode='constant'``, ``mode='discard'`` and ``mode='reflect'`` are currently supported, although",
"min_x = max(0, -min_x) min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y =",
"weak test 25000 ''' assert pixels_per_cell[0] % 2 == 1 and pixels_per_cell[1] %",
"xrange(xstrides_per_image): # chip out pixels in this sliding window min_x = self.start_pt.x +",
"and start at 1: {1,2,...}. label 0 is treated as unlabeled. image :",
"import skimage.measure #import matplotlib.pyplot as plt #import ipdb # could maybe turn this",
":min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( # top-left corner chip[:min_y, :min_x, ...]))) chunk[:min_y, max_x:, ...]",
"= mode self.cval = cval self.start_pt = Point2D(*(int(s) for s in start_pt)) self.stop_pt",
"= np.zeros(self.pixels_per_cell) min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min(self.pixels_per_cell[0], ncols",
"chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d( # bottom-left corner np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...])) elif self.mode",
"# TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces # pixel_stride <= pixels_per_cell # # NOTE",
"def __init__(self, x, y): self.x = x self.y = y def __iter__(self): '''iterate",
"supported, although others could be added (e.g., 'nearest' and 'wrap') cval : float,",
"self.y = y def __iter__(self): '''iterate over fields tuple/list style''' for field_name in",
"# weak test 25000 ''' assert pixels_per_cell[0] % 2 == 1 and pixels_per_cell[1]",
"self def iter(self, image=None): '''Next superpixel generator Parameters ---------- image : array_like, optional",
"pixel_stride=None, image=None, mode='constant', cval=0, start_pt=(0, 0), stop_pt=(None, None)): ''' Sliding window iterator. Parameters",
"...])) chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d( # left border chip[:, :min_x, ...])) #",
"# regionprops() treats label zero (0) as unlabeled and ignores it # TODO",
"BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") for rp in properties: if rp._slice",
"over the image. # RE this is more efficient though if self.mode ==",
"create IterateOverOverlappingWindows(IterateOverWindows), which enforces # pixel_stride <= pixels_per_cell # # NOTE if pixel_stride",
"1, \\ 'provide an odd number for pixels_per_cell to easily center the window'",
"namedtuple class Point2D(object): __slots__ = \"x\", \"y\" def __init__(self, x, y): self.x =",
": turned into a class sgr : added mode='reflect' ''' if image is",
"over fields tuple/list style''' for field_name in self.__slots__: yield getattr(self, field_name) def __getitem__(self,",
"...] = np.fliplr(np.atleast_2d( # left border chip[:, :min_x, ...])) # NOTE neg indice",
"the chip (which may exceed the bounds of the image) MODIFICATIONS sgr :",
"numpy as np from collections import namedtuple import skimage.measure #import matplotlib.pyplot as plt",
"filled according to the given mode. Only ``mode='constant'``, ``mode='discard'`` and ``mode='reflect'`` are currently",
"= np.atleast_2d( # bottom-right corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...]) chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d(",
"assert pixels_per_cell[0] % 2 == 1 and pixels_per_cell[1] % 2 == 1, \\",
"#print(i, ':\\n', ret[0]) >>> print(tot) # weak test 25000 ''' assert pixels_per_cell[0] %",
"np.atleast_2d( # right border np.fliplr(chip)[:, :ncols_chunk-max_x, ...]) chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( #",
"the image for r in xrange(ystrides_per_image): for c in xrange(xstrides_per_image): # chip out",
"pixel_stride > pixels_per_cell/2, it is possible to leave data unseen on the #",
"of the image without bounds checking # for additional speedup BoundingBox = namedtuple(\"BoundingBox\",",
"ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot) # weak test 25000 ''' assert",
"0.0 start_pt : array_like, optional (x,y) stop_pt : array_like, optional (x,y) >>> tot",
"numpy.array mask : the binary mask of the window within the chip BoundingBox",
"NoneType\") nrows, ncols = self.image.shape[0:2] # NOTE could iterate over the interior of",
"- pixels_per_half_cell[0] max_x = min_x+self.pixels_per_cell[0] min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y",
"self.__slots__[index]) # NOTE IterateOverWindows and IterateOverSuperpixels must share the same iter() interface #",
"of the input are filled according to the given mode. Only ``mode='constant'``, ``mode='discard'``",
"necessary here and I could simply loop over the image. # RE this",
"RE this is more efficient though if self.mode == 'constant' or self.mode ==",
"can be easily centered pixel_stride : array_like, optional x,y image : array_like, optional",
"additional speedup BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2",
"min(nrows, bbox.max_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) chip = self.image[min_y:max_y, min_x:max_x, ...]",
"#print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) chip = self.image[min_y:max_y, min_x:max_x, ...] # couch",
"'unrecognized mode' # FIXME should bbox be max-1 like in the superpixel version",
"exceed the bounds of the image) MODIFICATIONS sgr : turned into a class",
"of type NoneType\") # regionprops() treats label zero (0) as unlabeled and ignores",
"style''' for field_name in self.__slots__: yield getattr(self, field_name) def __getitem__(self, index): '''tuple/list style",
"matplotlib.pyplot as plt #import ipdb # could maybe turn this into a generic",
"the input are filled according to the given mode. Only ``mode='constant'``, ``mode='discard'`` and",
"if pixel_stride > pixels_per_cell/2, it is possible to leave data unseen on the",
"array_like, optional (x,y) >>> tot = 0; im = np.arange(100).reshape((10,10)) >>> for i,ret",
"bbox.min_x), min(ncols, bbox.max_x) min_y, max_y = max(0, bbox.min_y), min(nrows, bbox.max_y) #print('c=%d'%c, 'r=%d'%r, min_x,",
"for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n', ret[0]) >>>",
"min_y = max(0, -min_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) #print() chunk[min_y:max_y, min_x:max_x,",
"...])) # NOTE neg indice trikery (flipping first simplifies indexing) chunk[max_y:, min_x:max_x, ...]",
"- let x,y be odd so the window can be easily centered pixel_stride",
"``mode='reflect'`` are currently supported, although others could be added (e.g., 'nearest' and 'wrap')",
"max_x:, ...] = np.atleast_2d( # right border np.fliplr(chip)[:, :ncols_chunk-max_x, ...]) chunk[:min_y, :min_x, ...]",
"(which may exceed the bounds of the image) MODIFICATIONS sgr : turned into",
"to leave data unseen on the # right/bottom boarder of an image #",
"'provide an odd number for pixels_per_cell to easily center the window' self.pixels_per_cell =",
"Parameters ---------- image : array_like like numpy.array (ndim == 2 or 3) '''",
"= image elif self.image is None: raise TypeError(\"self.image cannot be of type NoneType\")",
"...]) chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d( # bottom-left corner np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...])) elif",
"a class ''' if image is not None: self.image = image elif self.image",
"= chip else: assert False, 'unrecognized mode' # FIXME should bbox be max-1",
"chip BoundingBox bbox : the inclusive extents of the chip (which may exceed",
"Parameters ---------- pixels_per_cell : array_like x,y - let x,y be odd so the",
"xstrides_per_image = self.shape() # iterate around the boarder of the image for r",
"tuple(pixels_per_cell) self.pixel_stride = self.pixels_per_cell if pixel_stride is None else pixel_stride self.image = image",
"wouldn't # be necessary here and I could simply loop over the image.",
"is treated as unlabeled. image : array_like, optional like numpy.array (ndim == 2",
"continue (min_y,min_x,max_y,max_x) = rp.bbox chip = image[min_y:max_y, min_x:max_x,...] mask = rp.filled_image bbox =",
"than it should be... chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d( # top border chip[:min_y,",
"used for points outside the boundaries of the input if ``mode='constant'``. Default is",
"== 1, \\ 'provide an odd number for pixels_per_cell to easily center the",
"__getitem__(self, index): '''tuple/list style getitem''' return getattr(self, self.__slots__[index]) # NOTE IterateOverWindows and IterateOverSuperpixels",
"cval=0, start_pt=(0, 0), stop_pt=(None, None)): ''' Sliding window iterator. Parameters ---------- pixels_per_cell :",
"in a fixed-size window # REVIEW I could refactor handling the boarder into",
"remove small, unconnected components properties = skimage.measure.regionprops(self.segmented) BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y",
"ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot) # weak test 22647 >>> tot",
"be easily centered pixel_stride : array_like, optional x,y image : array_like, optional like",
"``mode='discard'`` and ``mode='reflect'`` are currently supported, although others could be added (e.g., 'nearest'",
"this seems harder than it should be... chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d( #",
"Parameters ---------- image : array_like like numpy.array (ndim == 2 or 3) Returns",
"self.stop_pt = Point2D(*(stop_pt)) def setImage(self, image): ''' Parameters ---------- image : array_like like",
"optional Points outside the boundaries of the input are filled according to the",
"stop_pt=(None, None)): ''' Sliding window iterator. Parameters ---------- pixels_per_cell : array_like x,y -",
"min_y, max_y = max(0, bbox.min_y), min(nrows, bbox.max_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y)",
"max_x, min_y, max_y) chip = self.image[min_y:max_y, min_x:max_x, ...] # couch chip in a",
"top-left corner chip[:min_y, :min_x, ...]))) chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d( # top-right corner",
"= self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min(self.pixels_per_cell[0], ncols - min_x) min_x",
"max_x = min(self.pixels_per_cell[0], ncols - min_x) min_x = max(0, -min_x) min_y = self.start_pt.y",
"bounds of the image) MODIFICATIONS sgr : optimized sgr : turned into a",
"self.image.shape[0:2] # NOTE could iterate over the interior of the image without bounds",
"added (e.g., 'nearest' and 'wrap') cval : float, optional Value used for points",
"and 'wrap') cval : float, optional Value used for points outside the boundaries",
"min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min_x+self.pixels_per_cell[0] min_y = self.start_pt.y",
"bounds checking # for additional speedup BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\")",
"shape(self): if self.image is None: raise TypeError(\"self.image cannot be of type NoneType\") nrows,",
"version yield chunk, mask, bbox class IterateOverSuperpixels(object): def __init__(self, segmented, image=None): self.segmented =",
"treated as unlabeled. image : array_like, optional like numpy.array (ndim == 2 or",
"outside the boundaries of the input are filled according to the given mode.",
"the # right/bottom boarder of an image # # this is similar to",
"min_x:max_x, ...] = np.flipud(np.atleast_2d( # top border chip[:min_y, :, ...])) chunk[min_y:max_y, :min_x, ...]",
"bottom border np.flipud(chip)[:nrows_chunk-max_y, :, ...]) chunk[min_y:max_y, max_x:, ...] = np.atleast_2d( # right border",
"generator Parameters ---------- image : array_like, optional like numpy.array (ndim == 2 or",
"unconnected components properties = skimage.measure.regionprops(self.segmented) BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") for",
"+ ((self.image.shape[2],) if self.image.ndim == 3 else ()), dtype=self.image.dtype.type) chunk[:] = self.cval mask",
"Returns ------- numpy.array, optional chip : pixels within the current window. Points outside",
"((self.image.shape[2],) if self.image.ndim == 3 else ()), dtype=self.image.dtype.type) chunk[:] = self.cval mask =",
"= self.image.shape[0:2] # NOTE could iterate over the interior of the image without",
"iter() interface # TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces # pixel_stride <= pixels_per_cell #",
"self.segmented = segmented self.image = image ''' Parameters ---------- segmented : array_like Superpixel",
"\"y\" def __init__(self, x, y): self.x = x self.y = y def __iter__(self):",
"2 or 3) mode : str, optional Points outside the boundaries of the",
"= Point2D(*(int(s) for s in start_pt)) self.stop_pt = Point2D(*(stop_pt)) def setImage(self, image): '''",
"\"min_x max_x min_y max_y\") for rp in properties: if rp._slice is None: continue",
"#print(roi_width, roi_height, self.pixel_stride) nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return (nrows, ncols) def",
"IterateOverOverlappingWindows(IterateOverWindows), which enforces # pixel_stride <= pixels_per_cell # # NOTE if pixel_stride >",
"== 2 or 3) ''' self.image = image return self def iter(self, image=None):",
"Parameters ---------- image : array_like, optional like numpy.array (ndim == 2 or 3)",
"chip[:, :min_x, ...])) # NOTE neg indice trikery (flipping first simplifies indexing) chunk[max_y:,",
"max_y = min(self.pixels_per_cell[1], nrows - min_y) min_y = max(0, -min_y) #print('c=%d'%c, 'r=%d'%r, min_x,",
"min_x, max_x, min_y, max_y) #print() chunk[min_y:max_y, min_x:max_x, ...] = chip mask[min_y:max_y, min_x:max_x] =",
"if image is not None: self.image = image elif self.image is None: raise",
"assert False, 'unrecognized mode' # FIXME should bbox be max-1 like in the",
"getattr(self, field_name) def __getitem__(self, index): '''tuple/list style getitem''' return getattr(self, self.__slots__[index]) # NOTE",
"be... chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d( # top border chip[:min_y, :, ...])) chunk[min_y:max_y,",
"self.cval = cval self.start_pt = Point2D(*(int(s) for s in start_pt)) self.stop_pt = Point2D(*(stop_pt))",
"setImage(self, image): ''' Parameters ---------- image : array_like like numpy.array (ndim == 2",
"...]) chunk[min_y:max_y, max_x:, ...] = np.atleast_2d( # right border np.fliplr(chip)[:, :ncols_chunk-max_x, ...]) chunk[:min_y,",
"min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min(self.pixels_per_cell[1], nrows - min_y)",
"chunk[min_y:max_y, max_x:, ...] = np.atleast_2d( # right border np.fliplr(chip)[:, :ncols_chunk-max_x, ...]) chunk[:min_y, :min_x,",
"is not None: self.image = image elif self.image is None: raise TypeError(\"self.image cannot",
"(e.g., 'nearest' and 'wrap') cval : float, optional Value used for points outside",
"superpixel version yield chunk, mask, bbox class IterateOverSuperpixels(object): def __init__(self, segmented, image=None): self.segmented",
"for points outside the boundaries of the input if ``mode='constant'``. Default is 0.0",
"window within the chip BoundingBox bbox : the inclusive extents of the chip",
"'constant' or self.mode == 'reflect': chunk = np.empty( self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim",
"could simply loop over the image. # RE this is more efficient though",
"image self.mode = mode self.cval = cval self.start_pt = Point2D(*(int(s) for s in",
"border chip[:, :min_x, ...])) # NOTE neg indice trikery (flipping first simplifies indexing)",
"``mode='constant'``, ``mode='discard'`` and ``mode='reflect'`` are currently supported, although others could be added (e.g.,",
"self.mode == 'reflect': chunk = np.empty( self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3",
"mask[min_y:max_y, min_x:max_x] = 1 if self.mode == 'reflect': nrows_chunk, ncols_chunk = chunk.shape[0:2] #",
"currently supported, although others could be added (e.g., 'nearest' and 'wrap') cval :",
"\\ 'provide an odd number for pixels_per_cell to easily center the window' self.pixels_per_cell",
"---------- image : array_like like numpy.array (ndim == 2 or 3) Returns -------",
"# FIXME should bbox be max-1 like in the superpixel version yield chunk,",
"small, unconnected components properties = skimage.measure.regionprops(self.segmented) BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\")",
"pixel_stride is None else pixel_stride self.image = image self.mode = mode self.cval =",
"(x,y) >>> tot = 0; im = np.arange(100).reshape((10,10)) >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)):",
"()), dtype=self.image.dtype.type) chunk[:] = self.cval mask = np.zeros(self.pixels_per_cell) min_x = self.start_pt.x + self.pixel_stride[0]*c",
"self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min_y+self.pixels_per_cell[1] bbox = BoundingBox(min_x,max_x,min_y,max_y) min_x, max_x",
"segmented, image=None): self.segmented = segmented self.image = image ''' Parameters ---------- segmented :",
"''' Parameters ---------- image : array_like like numpy.array (ndim == 2 or 3)",
"ret[0]) >>> print(tot) # weak test 22647 >>> tot = 0; im =",
"= chip mask[min_y:max_y, min_x:max_x] = 1 if self.mode == 'reflect': nrows_chunk, ncols_chunk =",
"of the image for r in xrange(ystrides_per_image): for c in xrange(xstrides_per_image): # chip",
"border np.flipud(chip)[:nrows_chunk-max_y, :, ...]) chunk[min_y:max_y, max_x:, ...] = np.atleast_2d( # right border np.fliplr(chip)[:,",
"seems harder than it should be... chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d( # top",
"self.pixels_per_cell = tuple(pixels_per_cell) self.pixel_stride = self.pixels_per_cell if pixel_stride is None else pixel_stride self.image",
"corner np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...])) elif self.mode == 'discard': mask = np.ones_like(chip) chunk =",
"rp in properties: if rp._slice is None: continue (min_y,min_x,max_y,max_x) = rp.bbox chip =",
"np.fliplr(np.flipud(np.atleast_2d( # top-left corner chip[:min_y, :min_x, ...]))) chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d( #",
"pixels_per_cell[1] % 2 == 1, \\ 'provide an odd number for pixels_per_cell to",
"as unlabeled. image : array_like, optional like numpy.array (ndim == 2 or 3)",
"window' self.pixels_per_cell = tuple(pixels_per_cell) self.pixel_stride = self.pixels_per_cell if pixel_stride is None else pixel_stride",
"filled from chip. # this seems harder than it should be... chunk[:min_y, min_x:max_x,",
"dtype=self.image.dtype.type) chunk[:] = self.cval mask = np.zeros(self.pixels_per_cell) min_x = self.start_pt.x + self.pixel_stride[0]*c -",
"leave data unseen on the # right/bottom boarder of an image # #",
"pixel_stride : array_like, optional x,y image : array_like, optional like numpy.array (ndim ==",
"np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return (nrows, ncols) def iter(self,image=None): '''Next window generator Parameters",
"refactor handling the boarder into pad_image(). then mode wouldn't # be necessary here",
"im = np.arange(81).reshape((9,9)).T >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ... tot += ret[0].sum() ...",
"test 22647 >>> tot = 0; im = np.arange(81).reshape((9,9)).T >>> for i,ret in",
">>> print(tot) # weak test 25000 ''' assert pixels_per_cell[0] % 2 == 1",
"though if self.mode == 'constant' or self.mode == 'reflect': chunk = np.empty( self.pixels_per_cell",
"Point2D(*(stop_pt)) def setImage(self, image): ''' Parameters ---------- image : array_like like numpy.array (ndim",
"= np.flipud(np.atleast_2d( # top-right corner np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...])) chunk[max_y:, max_x:, ...] = np.atleast_2d(",
"mask : the binary mask of the window within the chip BoundingBox bbox",
"over the interior of the image without bounds checking # for additional speedup",
"to the given mode. numpy.array mask : the binary mask of the window",
"x,y be odd so the window can be easily centered pixel_stride : array_like,",
"ignores it # TODO remove small, unconnected components properties = skimage.measure.regionprops(self.segmented) BoundingBox =",
"class Point2D(object): __slots__ = \"x\", \"y\" def __init__(self, x, y): self.x = x",
"# right border np.fliplr(chip)[:, :ncols_chunk-max_x, ...]) chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( # top-left",
":min_x, ...])) elif self.mode == 'discard': mask = np.ones_like(chip) chunk = chip else:",
"elif self.mode == 'discard': mask = np.ones_like(chip) chunk = chip else: assert False,",
"25000 ''' assert pixels_per_cell[0] % 2 == 1 and pixels_per_cell[1] % 2 ==",
": turned into a class ''' if image is not None: self.image =",
"self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y = min(self.pixels_per_cell[1], nrows - min_y) min_y = max(0, -min_y)",
"array_like x,y - let x,y be odd so the window can be easily",
"self.pixel_stride) nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return (nrows, ncols) def iter(self,image=None): '''Next",
"image. # RE this is more efficient though if self.mode == 'constant' or",
"= namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") for rp in properties: if rp._slice is",
"self def shape(self): if self.image is None: raise TypeError(\"self.image cannot be of type",
"max_x:, ...] = np.atleast_2d( # bottom-right corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...]) chunk[max_y:, :min_x, ...]",
"the boarder into pad_image(). then mode wouldn't # be necessary here and I",
"= self.cval mask = np.zeros(self.pixels_per_cell) min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x",
"cannot be of type NoneType\") nrows, ncols = self.image.shape[0:2] stop_x = ncols if",
"in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot) #",
"== 'discard': mask = np.ones_like(chip) chunk = chip else: assert False, 'unrecognized mode'",
"ystrides_per_image, xstrides_per_image = self.shape() # iterate around the boarder of the image for",
"the image) MODIFICATIONS sgr : turned into a class sgr : added mode='reflect'",
"np.ones_like(chip) chunk = chip else: assert False, 'unrecognized mode' # FIXME should bbox",
"a class sgr : added mode='reflect' ''' if image is not None: self.image",
"efficient though if self.mode == 'constant' or self.mode == 'reflect': chunk = np.empty(",
"chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d( # top border chip[:min_y, :, ...])) chunk[min_y:max_y, :min_x,",
"nrows, ncols = self.image.shape[0:2] stop_x = ncols if self.stop_pt.x is None else int(self.stop_pt.x)",
":min_x, ...] = np.fliplr(np.atleast_2d( # bottom-left corner np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...])) elif self.mode ==",
"None else int(self.stop_pt.x) stop_y = nrows if self.stop_pt.y is None else int(self.stop_pt.y) roi_height",
": optimized sgr : turned into a class ''' if image is not",
"plt #import ipdb # could maybe turn this into a generic mutable namedtuple",
"array_like, optional x,y image : array_like, optional like numpy.array (ndim == 2 or",
"# top-right corner np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...])) chunk[max_y:, max_x:, ...] = np.atleast_2d( # bottom-right",
"self.pixels_per_cell[1]//2 ystrides_per_image, xstrides_per_image = self.shape() # iterate around the boarder of the image",
"Point2D(*(int(s) for s in start_pt)) self.stop_pt = Point2D(*(stop_pt)) def setImage(self, image): ''' Parameters",
"assume the points outside the boundaries of input can be filled from chip.",
"...] = np.atleast_2d( # bottom border np.flipud(chip)[:nrows_chunk-max_y, :, ...]) chunk[min_y:max_y, max_x:, ...] =",
"self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min(self.pixels_per_cell[0], ncols - min_x) min_x =",
"bottom-right corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...]) chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d( # bottom-left corner",
"self.image = image ''' Parameters ---------- segmented : array_like Superpixel labeled segmentation (like",
"type NoneType\") # regionprops() treats label zero (0) as unlabeled and ignores it",
"np.fliplr(np.atleast_2d( # bottom-left corner np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...])) elif self.mode == 'discard': mask =",
"properties = skimage.measure.regionprops(self.segmented) BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") for rp in",
"= np.fliplr(np.flipud(np.atleast_2d( # top-left corner chip[:min_y, :min_x, ...]))) chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d(",
"None else pixel_stride self.image = image self.mode = mode self.cval = cval self.start_pt",
"image for r in xrange(ystrides_per_image): for c in xrange(xstrides_per_image): # chip out pixels",
"interface # TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces # pixel_stride <= pixels_per_cell # #",
"boundaries of input can be filled from chip. # this seems harder than",
"and ignores it # TODO remove small, unconnected components properties = skimage.measure.regionprops(self.segmented) BoundingBox",
"image) MODIFICATIONS sgr : turned into a class sgr : added mode='reflect' '''",
"- min_y) min_y = max(0, -min_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) #print()",
"segmentation (like numpy.array) NOTE regionprops expects labels to be sequential and start at",
"mode wouldn't # be necessary here and I could simply loop over the",
"boundaries of the input are filled according to the given mode. numpy.array mask",
"namedtuple import skimage.measure #import matplotlib.pyplot as plt #import ipdb # could maybe turn",
"min_y max_y\") for rp in properties: if rp._slice is None: continue (min_y,min_x,max_y,max_x) =",
"pixels_per_half_cell[0] max_x = min_x+self.pixels_per_cell[0] min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1] max_y =",
"None: raise TypeError(\"self.image cannot be of type NoneType\") nrows, ncols = self.image.shape[0:2] stop_x",
"and pixels_per_cell[1] % 2 == 1, \\ 'provide an odd number for pixels_per_cell",
"number for pixels_per_cell to easily center the window' self.pixels_per_cell = tuple(pixels_per_cell) self.pixel_stride =",
"the boundaries of input can be filled from chip. # this seems harder",
"and I could simply loop over the image. # RE this is more",
"'''Next superpixel generator Parameters ---------- image : array_like, optional like numpy.array (ndim ==",
"...] = np.fliplr(np.atleast_2d( # bottom-left corner np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...])) elif self.mode == 'discard':",
": array_like, optional (x,y) stop_pt : array_like, optional (x,y) >>> tot = 0;",
"self.image = image elif self.image is None: raise TypeError(\"self.image cannot be of type",
"':\\n', ret[0]) >>> print(tot) # weak test 22647 >>> tot = 0; im",
"image : array_like like numpy.array (ndim == 2 or 3) Returns ------- numpy.array,",
"'''tuple/list style getitem''' return getattr(self, self.__slots__[index]) # NOTE IterateOverWindows and IterateOverSuperpixels must share",
"numpy.array (ndim == 2 or 3) mode : str, optional Points outside the",
"self.image is None: raise TypeError(\"self.image cannot be of type NoneType\") nrows, ncols =",
"namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2 ystrides_per_image, xstrides_per_image = self.shape()",
"chip else: assert False, 'unrecognized mode' # FIXME should bbox be max-1 like",
"#import matplotlib.pyplot as plt #import ipdb # could maybe turn this into a",
"elif self.image is None: raise TypeError(\"self.image cannot be of type NoneType\") nrows, ncols",
"iterate over the interior of the image without bounds checking # for additional",
"it should be... chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d( # top border chip[:min_y, :,",
"type NoneType\") nrows, ncols = self.image.shape[0:2] # NOTE could iterate over the interior",
"first simplifies indexing) chunk[max_y:, min_x:max_x, ...] = np.atleast_2d( # bottom border np.flipud(chip)[:nrows_chunk-max_y, :,",
"min_x, max_x, min_y, max_y) chip = self.image[min_y:max_y, min_x:max_x, ...] # couch chip in",
"= 0; im = np.arange(100).reshape((10,10)) >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)): ... tot +=",
"np.flipud(chip)[:nrows_chunk-max_y, :, ...]) chunk[min_y:max_y, max_x:, ...] = np.atleast_2d( # right border np.fliplr(chip)[:, :ncols_chunk-max_x,",
"array_like, optional like numpy.array (ndim == 2 or 3) mode : str, optional",
"== 'reflect': nrows_chunk, ncols_chunk = chunk.shape[0:2] # NOTE assume the points outside the",
"are currently supported, although others could be added (e.g., 'nearest' and 'wrap') cval",
"= np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return (nrows, ncols) def iter(self,image=None): '''Next window generator Parameters ---------- image",
"exceed the bounds of the image) MODIFICATIONS sgr : optimized sgr : turned",
"self.pixels_per_cell if pixel_stride is None else pixel_stride self.image = image self.mode = mode",
"skimage.measure.regionprops(self.segmented) BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") for rp in properties: if",
"'r=%d'%r, min_x, max_x, min_y, max_y) chip = self.image[min_y:max_y, min_x:max_x, ...] # couch chip",
"y): self.x = x self.y = y def __iter__(self): '''iterate over fields tuple/list",
"rp.bbox chip = image[min_y:max_y, min_x:max_x,...] mask = rp.filled_image bbox = BoundingBox(min_x,max_x-1,min_y,max_y-1) yield (chip,",
"i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot)",
"the image. # RE this is more efficient though if self.mode == 'constant'",
"pixels in this sliding window min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x",
"self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min_x+self.pixels_per_cell[0] min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1]",
"# left border chip[:, :min_x, ...])) # NOTE neg indice trikery (flipping first",
"left border chip[:, :min_x, ...])) # NOTE neg indice trikery (flipping first simplifies",
"treats label zero (0) as unlabeled and ignores it # TODO remove small,",
"be added (e.g., 'nearest' and 'wrap') cval : float, optional Value used for",
"rp._slice is None: continue (min_y,min_x,max_y,max_x) = rp.bbox chip = image[min_y:max_y, min_x:max_x,...] mask =",
"given mode. Only ``mode='constant'``, ``mode='discard'`` and ``mode='reflect'`` are currently supported, although others could",
"elif self.image is None: raise TypeError(\"self.image cannot be of type NoneType\") # regionprops()",
"np.zeros(self.pixels_per_cell) min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min(self.pixels_per_cell[0], ncols -",
"window. Points outside the boundaries of the input are filled according to the",
"others could be added (e.g., 'nearest' and 'wrap') cval : float, optional Value",
"center the window' self.pixels_per_cell = tuple(pixels_per_cell) self.pixel_stride = self.pixels_per_cell if pixel_stride is None",
"inclusive extents of the chip (which may exceed the bounds of the image)",
"is None: continue (min_y,min_x,max_y,max_x) = rp.bbox chip = image[min_y:max_y, min_x:max_x,...] mask = rp.filled_image",
"= ncols if self.stop_pt.x is None else int(self.stop_pt.x) stop_y = nrows if self.stop_pt.y",
"Points outside the boundaries of the input are filled according to the given",
"+ self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min_x+self.pixels_per_cell[0] min_y = self.start_pt.y + self.pixel_stride[1]*r -",
"should be... chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d( # top border chip[:min_y, :, ...]))",
"``mode='constant'``. Default is 0.0 start_pt : array_like, optional (x,y) stop_pt : array_like, optional",
"the window' self.pixels_per_cell = tuple(pixels_per_cell) self.pixel_stride = self.pixels_per_cell if pixel_stride is None else",
"chunk, mask, bbox class IterateOverSuperpixels(object): def __init__(self, segmented, image=None): self.segmented = segmented self.image",
"IterateOverSuperpixels(object): def __init__(self, segmented, image=None): self.segmented = segmented self.image = image ''' Parameters",
"be odd so the window can be easily centered pixel_stride : array_like, optional",
"top-right corner np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...])) chunk[max_y:, max_x:, ...] = np.atleast_2d( # bottom-right corner",
"as plt #import ipdb # could maybe turn this into a generic mutable",
"Point2D(object): __slots__ = \"x\", \"y\" def __init__(self, x, y): self.x = x self.y",
"binary mask of the window within the chip BoundingBox bbox : the inclusive",
"generator Parameters ---------- image : array_like like numpy.array (ndim == 2 or 3)",
"...] # couch chip in a fixed-size window # REVIEW I could refactor",
"are filled according to the given mode. numpy.array mask : the binary mask",
": the inclusive extents of the chip (which may exceed the bounds of",
"''' def setImage(self, image): ''' Parameters ---------- image : array_like like numpy.array (ndim",
"...] = np.atleast_2d( # right border np.fliplr(chip)[:, :ncols_chunk-max_x, ...]) chunk[:min_y, :min_x, ...] =",
"(flipping first simplifies indexing) chunk[max_y:, min_x:max_x, ...] = np.atleast_2d( # bottom border np.flipud(chip)[:nrows_chunk-max_y,",
":, ...]) chunk[min_y:max_y, max_x:, ...] = np.atleast_2d( # right border np.fliplr(chip)[:, :ncols_chunk-max_x, ...])",
">>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ... tot += ret[0].sum() ... #print(i, ':\\n', ret[0])",
"NoneType\") nrows, ncols = self.image.shape[0:2] stop_x = ncols if self.stop_pt.x is None else",
"nrows if self.stop_pt.y is None else int(self.stop_pt.y) roi_height = stop_y-self.start_pt.y roi_width = stop_x-self.start_pt.x",
"IterateOverSuperpixels must share the same iter() interface # TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces",
"= max(0, -min_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) #print() chunk[min_y:max_y, min_x:max_x, ...]",
"cannot be of type NoneType\") # regionprops() treats label zero (0) as unlabeled",
"min_y) min_y = max(0, -min_y) #print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y) #print() chunk[min_y:max_y,",
"easily centered pixel_stride : array_like, optional x,y image : array_like, optional like numpy.array",
"...]))) chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d( # top-right corner np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...])) chunk[max_y:,",
"__iter__(self): '''iterate over fields tuple/list style''' for field_name in self.__slots__: yield getattr(self, field_name)",
"of the image) MODIFICATIONS sgr : optimized sgr : turned into a class",
"(ndim == 2 or 3) ''' def setImage(self, image): ''' Parameters ---------- image",
"NOTE if pixel_stride > pixels_per_cell/2, it is possible to leave data unseen on",
"in xrange(xstrides_per_image): # chip out pixels in this sliding window min_x = self.start_pt.x",
"namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") for rp in properties: if rp._slice is None:",
"+= ret[0].sum() ... #print(i, ':\\n', ret[0]) >>> print(tot) # weak test 25000 '''",
"= np.arange(81).reshape((9,9)).T >>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)): ... tot += ret[0].sum() ... #print(i,",
"to easily center the window' self.pixels_per_cell = tuple(pixels_per_cell) self.pixel_stride = self.pixels_per_cell if pixel_stride",
"roi_width = stop_x-self.start_pt.x #print(roi_width, roi_height, self.pixel_stride) nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return",
": array_like, optional like numpy.array (ndim == 2 or 3) mode : str,",
"numpy.array (ndim == 2 or 3) ''' self.image = image return self def",
"sgr : turned into a class ''' if image is not None: self.image",
"x self.y = y def __iter__(self): '''iterate over fields tuple/list style''' for field_name",
"+ self.pixel_stride[0]*c - pixels_per_half_cell[0] max_x = min(self.pixels_per_cell[0], ncols - min_x) min_x = max(0,",
"ncols if self.stop_pt.x is None else int(self.stop_pt.x) stop_y = nrows if self.stop_pt.y is",
"the superpixel version yield chunk, mask, bbox class IterateOverSuperpixels(object): def __init__(self, segmented, image=None):",
"given mode. numpy.array mask : the binary mask of the window within the",
"sequential and start at 1: {1,2,...}. label 0 is treated as unlabeled. image",
"return getattr(self, self.__slots__[index]) # NOTE IterateOverWindows and IterateOverSuperpixels must share the same iter()",
"# right/bottom boarder of an image # # this is similar to matlab's",
"= stop_x-self.start_pt.x #print(roi_width, roi_height, self.pixel_stride) nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int) ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int) return (nrows,",
"chunk[:] = self.cval mask = np.zeros(self.pixels_per_cell) min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0]",
"be filled from chip. # this seems harder than it should be... chunk[:min_y,",
"= np.fliplr(np.atleast_2d( # left border chip[:, :min_x, ...])) # NOTE neg indice trikery",
"# bottom-right corner np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...]) chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d( # bottom-left",
"---------- image : array_like, optional like numpy.array (ndim == 2 or 3) Returns",
"if self.mode == 'constant' or self.mode == 'reflect': chunk = np.empty( self.pixels_per_cell +",
"may exceed the bounds of the image) MODIFICATIONS sgr : optimized sgr :",
"TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces # pixel_stride <= pixels_per_cell # # NOTE if",
"image): ''' Parameters ---------- image : array_like like numpy.array (ndim == 2 or",
"x, y): self.x = x self.y = y def __iter__(self): '''iterate over fields",
": added mode='reflect' ''' if image is not None: self.image = image elif",
"def __init__(self, pixels_per_cell, pixel_stride=None, image=None, mode='constant', cval=0, start_pt=(0, 0), stop_pt=(None, None)): ''' Sliding",
"max_y = min_y+self.pixels_per_cell[1] bbox = BoundingBox(min_x,max_x,min_y,max_y) min_x, max_x = max(0, bbox.min_x), min(ncols, bbox.max_x)",
"# # NOTE if pixel_stride > pixels_per_cell/2, it is possible to leave data",
"the boundaries of the input are filled according to the given mode. Only",
"def __iter__(self): '''iterate over fields tuple/list style''' for field_name in self.__slots__: yield getattr(self,",
"= tuple(pixels_per_cell) self.pixel_stride = self.pixels_per_cell if pixel_stride is None else pixel_stride self.image =",
"# NOTE assume the points outside the boundaries of input can be filled",
"def iter(self, image=None): '''Next superpixel generator Parameters ---------- image : array_like, optional like",
"= rp.bbox chip = image[min_y:max_y, min_x:max_x,...] mask = rp.filled_image bbox = BoundingBox(min_x,max_x-1,min_y,max_y-1) yield",
"extents of the chip (which may exceed the bounds of the image) MODIFICATIONS",
"== 3 else ()), dtype=self.image.dtype.type) chunk[:] = self.cval mask = np.zeros(self.pixels_per_cell) min_x =",
"input if ``mode='constant'``. Default is 0.0 start_pt : array_like, optional (x,y) stop_pt :",
"window # REVIEW I could refactor handling the boarder into pad_image(). then mode",
"iterate around the boarder of the image for r in xrange(ystrides_per_image): for c",
"image ''' Parameters ---------- segmented : array_like Superpixel labeled segmentation (like numpy.array) NOTE",
"3 else ()), dtype=self.image.dtype.type) chunk[:] = self.cval mask = np.zeros(self.pixels_per_cell) min_x = self.start_pt.x",
"self.image is None: raise TypeError(\"self.image cannot be of type NoneType\") # regionprops() treats",
"# REVIEW I could refactor handling the boarder into pad_image(). then mode wouldn't",
"and IterateOverSuperpixels must share the same iter() interface # TODO create IterateOverOverlappingWindows(IterateOverWindows), which",
"IterateOverWindows and IterateOverSuperpixels must share the same iter() interface # TODO create IterateOverOverlappingWindows(IterateOverWindows),",
"like in the superpixel version yield chunk, mask, bbox class IterateOverSuperpixels(object): def __init__(self,",
"__init__(self, segmented, image=None): self.segmented = segmented self.image = image ''' Parameters ---------- segmented",
": array_like x,y - let x,y be odd so the window can be",
"max_x:, ...] = np.flipud(np.atleast_2d( # top-right corner np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...])) chunk[max_y:, max_x:, ...]",
"# could maybe turn this into a generic mutable namedtuple class Point2D(object): __slots__",
"0 is treated as unlabeled. image : array_like, optional like numpy.array (ndim ==",
"image return self def iter(self, image=None): '''Next superpixel generator Parameters ---------- image :",
"BoundingBox = namedtuple(\"BoundingBox\", \"min_x max_x min_y max_y\") pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2 ystrides_per_image, xstrides_per_image",
"= 1 if self.mode == 'reflect': nrows_chunk, ncols_chunk = chunk.shape[0:2] # NOTE assume",
"unseen on the # right/bottom boarder of an image # # this is",
"the interior of the image without bounds checking # for additional speedup BoundingBox",
"not None: self.image = image elif self.image is None: raise TypeError(\"self.image cannot be",
"the image without bounds checking # for additional speedup BoundingBox = namedtuple(\"BoundingBox\", \"min_x",
"be max-1 like in the superpixel version yield chunk, mask, bbox class IterateOverSuperpixels(object):",
"self.start_pt = Point2D(*(int(s) for s in start_pt)) self.stop_pt = Point2D(*(stop_pt)) def setImage(self, image):",
"loop over the image. # RE this is more efficient though if self.mode",
": array_like like numpy.array (ndim == 2 or 3) ''' self.image = image"
] |
[
"[] if servo.is_v2: dut_control_on.append([ 'spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', ]) dut_control_off.append([ 'spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off',",
"commands were based off the commands for other boards. programmer = 'raiden_debug_spi:target=AP,serial=%s' %",
"the voltage for Vref, to turn on and turn off the SPI flash.",
"1.8 V. Args: servo (servo_lib.Servo): The servo connected to the target DUT. Returns:",
"['flashrom', '-p', programmer, '-w'] futility_cmd = ['futility', 'update', '-p', programmer, '-i'] return [dut_control_on,",
"dut_control_on = [] dut_control_off = [] if servo.is_v2: dut_control_on.append([ 'spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on',",
"like [[\"cmd1\", \"arg1\", \"arg2\"], [\"cmd2\", \"arg3\", \"arg4\"]] where cmd1 will be run before",
"source code is governed by a BSD-style license that can be # found",
"of this source code is governed by a BSD-style license that can be",
"verification of the flash and allows these configs to flash properly. Meant to",
"'spi2_buf_on_flex_en:off', 'cold_reset:off', ]) programmer = 'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off',",
"futility \"\"\" dut_control_on = [] dut_control_off = [] if servo.is_v2: dut_control_on.append([ 'spi2_vref:pp1800', 'spi2_buf_en:on',",
"temporary hack until b/143240576 is fixed. Args: use_futility (bool): True if futility is",
"is fixed. Args: use_futility (bool): True if futility is to be used, False",
"flash via futility \"\"\" dut_control_on = [] dut_control_off = [] if servo.is_v2: dut_control_on.append([",
"use_futility (bool): True if futility is to be used, False if flashrom. servo",
"found in the LICENSE file. \"\"\"Grunt configs.\"\"\" from __future__ import print_function def is_fast_required(use_futility,",
"--fast removes verification of the flash and allows these configs to flash properly.",
"'-w'] futility_cmd = ['futility', 'update', '-p', programmer, '-i'] return [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]",
"a BSD-style license that can be # found in the LICENSE file. \"\"\"Grunt",
"BSD-style license that can be # found in the LICENSE file. \"\"\"Grunt configs.\"\"\"",
"file. \"\"\"Grunt configs.\"\"\" from __future__ import print_function def is_fast_required(use_futility, servo): \"\"\"Returns true if",
"properly. Meant to be a temporary hack until b/143240576 is fixed. Args: use_futility",
"Note nothing listed for flashing with ccd_cr50 on go/grunt-care. # These commands were",
"being used. Returns: bool: True if fast is necessary, False otherwise. \"\"\" return",
"servo.serial else: raise Exception('%s not supported' % servo.version) flashrom_cmd = ['flashrom', '-p', programmer,",
"to be used, False if flashrom. servo (servo_lib.Servo): The type name of the",
"be run before cmd2 flashrom_cmd=command to flash via flashrom futility_cmd=command to flash via",
"if --fast is necessary to flash successfully. The configurations in this function consistently",
"commands including the voltage for Vref, to turn on and turn off the",
"\"\"\"Grunt configs.\"\"\" from __future__ import print_function def is_fast_required(use_futility, servo): \"\"\"Returns true if --fast",
"SPI flash. The get_*_commands() functions provide a board-specific set of commands for these",
"The type name of the servo device being used. Returns: bool: True if",
"be used, False if flashrom. servo (servo_lib.Servo): The type name of the servo",
"run before cmd2 flashrom_cmd=command to flash via flashrom futility_cmd=command to flash via futility",
"voltage for this board needs to be set to 1.8 V. Args: servo",
"for this board needs to be set to 1.8 V. Args: servo (servo_lib.Servo):",
"configs.\"\"\" from __future__ import print_function def is_fast_required(use_futility, servo): \"\"\"Returns true if --fast is",
"off the SPI flash. The get_*_commands() functions provide a board-specific set of commands",
"turn off the SPI flash. The get_*_commands() functions provide a board-specific set of",
"hack until b/143240576 is fixed. Args: use_futility (bool): True if futility is to",
"flash commands for grunt Each board needs specific commands including the voltage for",
"dut_control_off.append([ 'spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', ]) programmer = 'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial elif servo.is_micro:",
"off the commands for other boards. programmer = 'raiden_debug_spi:target=AP,serial=%s' % servo.serial else: raise",
"nothing listed for flashing with ccd_cr50 on go/grunt-care. # These commands were based",
"this source code is governed by a BSD-style license that can be #",
"if servo.is_v2: dut_control_on.append([ 'spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', ]) dut_control_off.append([ 'spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off',",
"functions provide a board-specific set of commands for these tasks. The voltage for",
"needs specific commands including the voltage for Vref, to turn on and turn",
"commands for other boards. programmer = 'raiden_debug_spi:target=AP,serial=%s' % servo.serial else: raise Exception('%s not",
"= 'raiden_debug_spi:target=AP,serial=%s' % servo.serial else: raise Exception('%s not supported' % servo.version) flashrom_cmd =",
"The get_*_commands() functions provide a board-specific set of commands for these tasks. The",
"dut_control_off = [] if servo.is_v2: dut_control_on.append([ 'spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', ]) dut_control_off.append([ 'spi2_vref:off',",
"Exception('%s not supported' % servo.version) flashrom_cmd = ['flashrom', '-p', programmer, '-w'] futility_cmd =",
"Returns: bool: True if fast is necessary, False otherwise. \"\"\" return use_futility and",
"board needs to be set to 1.8 V. Args: servo (servo_lib.Servo): The servo",
"necessary to flash successfully. The configurations in this function consistently fail on the",
"board-specific set of commands for these tasks. The voltage for this board needs",
"connected to the target DUT. Returns: list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays",
"provide a board-specific set of commands for these tasks. The voltage for this",
"def get_commands(servo): \"\"\"Get specific flash commands for grunt Each board needs specific commands",
"-*- coding: utf-8 -*- # Copyright 2019 The Chromium OS Authors. All rights",
"can be # found in the LICENSE file. \"\"\"Grunt configs.\"\"\" from __future__ import",
"= [] dut_control_off = [] if servo.is_v2: dut_control_on.append([ 'spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', ])",
"'raiden_debug_spi:serial=%s' % servo.serial elif servo.is_ccd: # Note nothing listed for flashing with ccd_cr50",
"cmd2 flashrom_cmd=command to flash via flashrom futility_cmd=command to flash via futility \"\"\" dut_control_on",
"for grunt Each board needs specific commands including the voltage for Vref, to",
"a board-specific set of commands for these tasks. The voltage for this board",
"configurations in this function consistently fail on the verify step, adding --fast removes",
"fast is necessary, False otherwise. \"\"\" return use_futility and servo.is_v4 def get_commands(servo): \"\"\"Get",
"grunt Each board needs specific commands including the voltage for Vref, to turn",
"the SPI flash. The get_*_commands() functions provide a board-specific set of commands for",
"__future__ import print_function def is_fast_required(use_futility, servo): \"\"\"Returns true if --fast is necessary to",
"flash successfully. The configurations in this function consistently fail on the verify step,",
"is necessary, False otherwise. \"\"\" return use_futility and servo.is_v4 def get_commands(servo): \"\"\"Get specific",
"otherwise. \"\"\" return use_futility and servo.is_v4 def get_commands(servo): \"\"\"Get specific flash commands for",
"Returns: list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated like [[\"cmd1\", \"arg1\", \"arg2\"],",
"is necessary to flash successfully. The configurations in this function consistently fail on",
"futility_cmd=command to flash via futility \"\"\" dut_control_on = [] dut_control_off = [] if",
"'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off']) programmer = 'raiden_debug_spi:serial=%s' %",
"based off the commands for other boards. programmer = 'raiden_debug_spi:target=AP,serial=%s' % servo.serial else:",
"flashrom_cmd = ['flashrom', '-p', programmer, '-w'] futility_cmd = ['futility', 'update', '-p', programmer, '-i']",
"fixed. Args: use_futility (bool): True if futility is to be used, False if",
"'spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', ]) dut_control_off.append([ 'spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', ]) programmer =",
"'spi2_buf_on_flex_en:on', 'cold_reset:on', ]) dut_control_off.append([ 'spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', ]) programmer = 'ft2232_spi:type=google-servo-v2,serial=%s' %",
"dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off']) programmer = 'raiden_debug_spi:serial=%s' % servo.serial elif servo.is_ccd: # Note nothing listed",
"LICENSE file. \"\"\"Grunt configs.\"\"\" from __future__ import print_function def is_fast_required(use_futility, servo): \"\"\"Returns true",
"the flash and allows these configs to flash properly. Meant to be a",
"these tasks. The voltage for this board needs to be set to 1.8",
"go/grunt-care. # These commands were based off the commands for other boards. programmer",
"removes verification of the flash and allows these configs to flash properly. Meant",
"\"\"\"Returns true if --fast is necessary to flash successfully. The configurations in this",
"type name of the servo device being used. Returns: bool: True if fast",
"coding: utf-8 -*- # Copyright 2019 The Chromium OS Authors. All rights reserved.",
"def is_fast_required(use_futility, servo): \"\"\"Returns true if --fast is necessary to flash successfully. The",
"\"\"\" return use_futility and servo.is_v4 def get_commands(servo): \"\"\"Get specific flash commands for grunt",
"'spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', ]) programmer = 'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800',",
"import print_function def is_fast_required(use_futility, servo): \"\"\"Returns true if --fast is necessary to flash",
"name of the servo device being used. Returns: bool: True if fast is",
"commands for these tasks. The voltage for this board needs to be set",
"'-p', programmer, '-w'] futility_cmd = ['futility', 'update', '-p', programmer, '-i'] return [dut_control_on, dut_control_off,",
"a temporary hack until b/143240576 is fixed. Args: use_futility (bool): True if futility",
"to 1.8 V. Args: servo (servo_lib.Servo): The servo connected to the target DUT.",
"the target DUT. Returns: list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated like",
"set of commands for these tasks. The voltage for this board needs to",
"Authors. All rights reserved. # Use of this source code is governed by",
"set to 1.8 V. Args: servo (servo_lib.Servo): The servo connected to the target",
"flash properly. Meant to be a temporary hack until b/143240576 is fixed. Args:",
"target DUT. Returns: list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated like [[\"cmd1\",",
"that can be # found in the LICENSE file. \"\"\"Grunt configs.\"\"\" from __future__",
"formmated like [[\"cmd1\", \"arg1\", \"arg2\"], [\"cmd2\", \"arg3\", \"arg4\"]] where cmd1 will be run",
"flash. The get_*_commands() functions provide a board-specific set of commands for these tasks.",
"dut_control_on.append([ 'spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', ]) dut_control_off.append([ 'spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', ]) programmer",
"supported' % servo.version) flashrom_cmd = ['flashrom', '-p', programmer, '-w'] futility_cmd = ['futility', 'update',",
"from __future__ import print_function def is_fast_required(use_futility, servo): \"\"\"Returns true if --fast is necessary",
"to turn on and turn off the SPI flash. The get_*_commands() functions provide",
"used, False if flashrom. servo (servo_lib.Servo): The type name of the servo device",
"these configs to flash properly. Meant to be a temporary hack until b/143240576",
"where cmd1 will be run before cmd2 flashrom_cmd=command to flash via flashrom futility_cmd=command",
"= ['flashrom', '-p', programmer, '-w'] futility_cmd = ['futility', 'update', '-p', programmer, '-i'] return",
"The Chromium OS Authors. All rights reserved. # Use of this source code",
"Use of this source code is governed by a BSD-style license that can",
"the commands for other boards. programmer = 'raiden_debug_spi:target=AP,serial=%s' % servo.serial else: raise Exception('%s",
"to flash via flashrom futility_cmd=command to flash via futility \"\"\" dut_control_on = []",
"is_fast_required(use_futility, servo): \"\"\"Returns true if --fast is necessary to flash successfully. The configurations",
"= [] if servo.is_v2: dut_control_on.append([ 'spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', ]) dut_control_off.append([ 'spi2_vref:off', 'spi2_buf_en:off',",
"% servo.serial elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off']) programmer = 'raiden_debug_spi:serial=%s' % servo.serial",
"board needs specific commands including the voltage for Vref, to turn on and",
"to flash successfully. The configurations in this function consistently fail on the verify",
"'spi2_buf_en:off']) programmer = 'raiden_debug_spi:serial=%s' % servo.serial elif servo.is_ccd: # Note nothing listed for",
"dut_control*=2d arrays formmated like [[\"cmd1\", \"arg1\", \"arg2\"], [\"cmd2\", \"arg3\", \"arg4\"]] where cmd1 will",
"else: raise Exception('%s not supported' % servo.version) flashrom_cmd = ['flashrom', '-p', programmer, '-w']",
"adding --fast removes verification of the flash and allows these configs to flash",
"to be a temporary hack until b/143240576 is fixed. Args: use_futility (bool): True",
"get_commands(servo): \"\"\"Get specific flash commands for grunt Each board needs specific commands including",
"'cold_reset:off', ]) programmer = 'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off'])",
"servo.is_ccd: # Note nothing listed for flashing with ccd_cr50 on go/grunt-care. # These",
"True if fast is necessary, False otherwise. \"\"\" return use_futility and servo.is_v4 def",
"of the servo device being used. Returns: bool: True if fast is necessary,",
"\"arg1\", \"arg2\"], [\"cmd2\", \"arg3\", \"arg4\"]] where cmd1 will be run before cmd2 flashrom_cmd=command",
"to flash via futility \"\"\" dut_control_on = [] dut_control_off = [] if servo.is_v2:",
"necessary, False otherwise. \"\"\" return use_futility and servo.is_v4 def get_commands(servo): \"\"\"Get specific flash",
"flashrom_cmd=command to flash via flashrom futility_cmd=command to flash via futility \"\"\" dut_control_on =",
"commands for grunt Each board needs specific commands including the voltage for Vref,",
"--fast is necessary to flash successfully. The configurations in this function consistently fail",
"Meant to be a temporary hack until b/143240576 is fixed. Args: use_futility (bool):",
"until b/143240576 is fixed. Args: use_futility (bool): True if futility is to be",
"Args: servo (servo_lib.Servo): The servo connected to the target DUT. Returns: list: [dut_control_on,",
"arrays formmated like [[\"cmd1\", \"arg1\", \"arg2\"], [\"cmd2\", \"arg3\", \"arg4\"]] where cmd1 will be",
"list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated like [[\"cmd1\", \"arg1\", \"arg2\"], [\"cmd2\",",
"bool: True if fast is necessary, False otherwise. \"\"\" return use_futility and servo.is_v4",
"= 'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off']) programmer = 'raiden_debug_spi:serial=%s'",
"elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off']) programmer = 'raiden_debug_spi:serial=%s' % servo.serial elif servo.is_ccd:",
"function consistently fail on the verify step, adding --fast removes verification of the",
"code is governed by a BSD-style license that can be # found in",
"(servo_lib.Servo): The servo connected to the target DUT. Returns: list: [dut_control_on, dut_control_off, flashrom_cmd,",
"before cmd2 flashrom_cmd=command to flash via flashrom futility_cmd=command to flash via futility \"\"\"",
"\"arg3\", \"arg4\"]] where cmd1 will be run before cmd2 flashrom_cmd=command to flash via",
"listed for flashing with ccd_cr50 on go/grunt-care. # These commands were based off",
"rights reserved. # Use of this source code is governed by a BSD-style",
"step, adding --fast removes verification of the flash and allows these configs to",
"of the flash and allows these configs to flash properly. Meant to be",
"[\"cmd2\", \"arg3\", \"arg4\"]] where cmd1 will be run before cmd2 flashrom_cmd=command to flash",
"\"arg4\"]] where cmd1 will be run before cmd2 flashrom_cmd=command to flash via flashrom",
"for Vref, to turn on and turn off the SPI flash. The get_*_commands()",
"]) dut_control_off.append([ 'spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', ]) programmer = 'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial elif",
"will be run before cmd2 flashrom_cmd=command to flash via flashrom futility_cmd=command to flash",
"\"arg2\"], [\"cmd2\", \"arg3\", \"arg4\"]] where cmd1 will be run before cmd2 flashrom_cmd=command to",
"The configurations in this function consistently fail on the verify step, adding --fast",
"with ccd_cr50 on go/grunt-care. # These commands were based off the commands for",
"-*- # Copyright 2019 The Chromium OS Authors. All rights reserved. # Use",
"get_*_commands() functions provide a board-specific set of commands for these tasks. The voltage",
"be set to 1.8 V. Args: servo (servo_lib.Servo): The servo connected to the",
"# found in the LICENSE file. \"\"\"Grunt configs.\"\"\" from __future__ import print_function def",
"tasks. The voltage for this board needs to be set to 1.8 V.",
"on go/grunt-care. # These commands were based off the commands for other boards.",
"% servo.version) flashrom_cmd = ['flashrom', '-p', programmer, '-w'] futility_cmd = ['futility', 'update', '-p',",
"(bool): True if futility is to be used, False if flashrom. servo (servo_lib.Servo):",
"is to be used, False if flashrom. servo (servo_lib.Servo): The type name of",
"raise Exception('%s not supported' % servo.version) flashrom_cmd = ['flashrom', '-p', programmer, '-w'] futility_cmd",
"Each board needs specific commands including the voltage for Vref, to turn on",
"if flashrom. servo (servo_lib.Servo): The type name of the servo device being used.",
"These commands were based off the commands for other boards. programmer = 'raiden_debug_spi:target=AP,serial=%s'",
"elif servo.is_ccd: # Note nothing listed for flashing with ccd_cr50 on go/grunt-care. #",
"specific flash commands for grunt Each board needs specific commands including the voltage",
"# These commands were based off the commands for other boards. programmer =",
"futility_cmd] dut_control*=2d arrays formmated like [[\"cmd1\", \"arg1\", \"arg2\"], [\"cmd2\", \"arg3\", \"arg4\"]] where cmd1",
"All rights reserved. # Use of this source code is governed by a",
"turn on and turn off the SPI flash. The get_*_commands() functions provide a",
"via futility \"\"\" dut_control_on = [] dut_control_off = [] if servo.is_v2: dut_control_on.append([ 'spi2_vref:pp1800',",
"for these tasks. The voltage for this board needs to be set to",
"by a BSD-style license that can be # found in the LICENSE file.",
"servo.is_v4 def get_commands(servo): \"\"\"Get specific flash commands for grunt Each board needs specific",
"The voltage for this board needs to be set to 1.8 V. Args:",
"]) programmer = 'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off']) programmer",
"license that can be # found in the LICENSE file. \"\"\"Grunt configs.\"\"\" from",
"the verify step, adding --fast removes verification of the flash and allows these",
"allows these configs to flash properly. Meant to be a temporary hack until",
"utf-8 -*- # Copyright 2019 The Chromium OS Authors. All rights reserved. #",
"servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off']) programmer = 'raiden_debug_spi:serial=%s' % servo.serial elif servo.is_ccd: #",
"this board needs to be set to 1.8 V. Args: servo (servo_lib.Servo): The",
"specific commands including the voltage for Vref, to turn on and turn off",
"dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off']) programmer = 'raiden_debug_spi:serial=%s' % servo.serial elif servo.is_ccd: # Note",
"is governed by a BSD-style license that can be # found in the",
"% servo.serial elif servo.is_ccd: # Note nothing listed for flashing with ccd_cr50 on",
"'raiden_debug_spi:target=AP,serial=%s' % servo.serial else: raise Exception('%s not supported' % servo.version) flashrom_cmd = ['flashrom',",
"true if --fast is necessary to flash successfully. The configurations in this function",
"this function consistently fail on the verify step, adding --fast removes verification of",
"True if futility is to be used, False if flashrom. servo (servo_lib.Servo): The",
"governed by a BSD-style license that can be # found in the LICENSE",
"Args: use_futility (bool): True if futility is to be used, False if flashrom.",
"return use_futility and servo.is_v4 def get_commands(servo): \"\"\"Get specific flash commands for grunt Each",
"'cold_reset:on', ]) dut_control_off.append([ 'spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', ]) programmer = 'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial",
"False otherwise. \"\"\" return use_futility and servo.is_v4 def get_commands(servo): \"\"\"Get specific flash commands",
"on the verify step, adding --fast removes verification of the flash and allows",
"programmer = 'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off']) programmer =",
"and allows these configs to flash properly. Meant to be a temporary hack",
"ccd_cr50 on go/grunt-care. # These commands were based off the commands for other",
"# Note nothing listed for flashing with ccd_cr50 on go/grunt-care. # These commands",
"reserved. # Use of this source code is governed by a BSD-style license",
"False if flashrom. servo (servo_lib.Servo): The type name of the servo device being",
"\"\"\"Get specific flash commands for grunt Each board needs specific commands including the",
"flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated like [[\"cmd1\", \"arg1\", \"arg2\"], [\"cmd2\", \"arg3\", \"arg4\"]] where",
"verify step, adding --fast removes verification of the flash and allows these configs",
"dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated like [[\"cmd1\", \"arg1\", \"arg2\"], [\"cmd2\", \"arg3\", \"arg4\"]]",
"V. Args: servo (servo_lib.Servo): The servo connected to the target DUT. Returns: list:",
"be a temporary hack until b/143240576 is fixed. Args: use_futility (bool): True if",
"needs to be set to 1.8 V. Args: servo (servo_lib.Servo): The servo connected",
"cmd1 will be run before cmd2 flashrom_cmd=command to flash via flashrom futility_cmd=command to",
"# Use of this source code is governed by a BSD-style license that",
"via flashrom futility_cmd=command to flash via futility \"\"\" dut_control_on = [] dut_control_off =",
"servo.is_v2: dut_control_on.append([ 'spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', ]) dut_control_off.append([ 'spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', ])",
"for other boards. programmer = 'raiden_debug_spi:target=AP,serial=%s' % servo.serial else: raise Exception('%s not supported'",
"servo.version) flashrom_cmd = ['flashrom', '-p', programmer, '-w'] futility_cmd = ['futility', 'update', '-p', programmer,",
"if futility is to be used, False if flashrom. servo (servo_lib.Servo): The type",
"Vref, to turn on and turn off the SPI flash. The get_*_commands() functions",
"use_futility and servo.is_v4 def get_commands(servo): \"\"\"Get specific flash commands for grunt Each board",
"and turn off the SPI flash. The get_*_commands() functions provide a board-specific set",
"to flash properly. Meant to be a temporary hack until b/143240576 is fixed.",
"programmer = 'raiden_debug_spi:target=AP,serial=%s' % servo.serial else: raise Exception('%s not supported' % servo.version) flashrom_cmd",
"device being used. Returns: bool: True if fast is necessary, False otherwise. \"\"\"",
"to be set to 1.8 V. Args: servo (servo_lib.Servo): The servo connected to",
"servo connected to the target DUT. Returns: list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d",
"and servo.is_v4 def get_commands(servo): \"\"\"Get specific flash commands for grunt Each board needs",
"Chromium OS Authors. All rights reserved. # Use of this source code is",
"programmer, '-w'] futility_cmd = ['futility', 'update', '-p', programmer, '-i'] return [dut_control_on, dut_control_off, flashrom_cmd,",
"'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', ]) dut_control_off.append([ 'spi2_vref:off', 'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', ]) programmer = 'ft2232_spi:type=google-servo-v2,serial=%s'",
"the LICENSE file. \"\"\"Grunt configs.\"\"\" from __future__ import print_function def is_fast_required(use_futility, servo): \"\"\"Returns",
"including the voltage for Vref, to turn on and turn off the SPI",
"servo (servo_lib.Servo): The type name of the servo device being used. Returns: bool:",
"of commands for these tasks. The voltage for this board needs to be",
"flash via flashrom futility_cmd=command to flash via futility \"\"\" dut_control_on = [] dut_control_off",
"servo.serial elif servo.is_ccd: # Note nothing listed for flashing with ccd_cr50 on go/grunt-care.",
"flashrom. servo (servo_lib.Servo): The type name of the servo device being used. Returns:",
"programmer = 'raiden_debug_spi:serial=%s' % servo.serial elif servo.is_ccd: # Note nothing listed for flashing",
"'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off']) programmer = 'raiden_debug_spi:serial=%s' % servo.serial elif servo.is_ccd: # Note nothing",
"[[\"cmd1\", \"arg1\", \"arg2\"], [\"cmd2\", \"arg3\", \"arg4\"]] where cmd1 will be run before cmd2",
"= 'raiden_debug_spi:serial=%s' % servo.serial elif servo.is_ccd: # Note nothing listed for flashing with",
"boards. programmer = 'raiden_debug_spi:target=AP,serial=%s' % servo.serial else: raise Exception('%s not supported' % servo.version)",
"flash and allows these configs to flash properly. Meant to be a temporary",
"for flashing with ccd_cr50 on go/grunt-care. # These commands were based off the",
"2019 The Chromium OS Authors. All rights reserved. # Use of this source",
"flashing with ccd_cr50 on go/grunt-care. # These commands were based off the commands",
"% servo.serial else: raise Exception('%s not supported' % servo.version) flashrom_cmd = ['flashrom', '-p',",
"DUT. Returns: list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated like [[\"cmd1\", \"arg1\",",
"servo.serial elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on']) dut_control_off.append(['spi2_vref:off', 'spi2_buf_en:off']) programmer = 'raiden_debug_spi:serial=%s' % servo.serial elif",
"servo): \"\"\"Returns true if --fast is necessary to flash successfully. The configurations in",
"to the target DUT. Returns: list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated",
"successfully. The configurations in this function consistently fail on the verify step, adding",
"if fast is necessary, False otherwise. \"\"\" return use_futility and servo.is_v4 def get_commands(servo):",
"the servo device being used. Returns: bool: True if fast is necessary, False",
"other boards. programmer = 'raiden_debug_spi:target=AP,serial=%s' % servo.serial else: raise Exception('%s not supported' %",
"The servo connected to the target DUT. Returns: list: [dut_control_on, dut_control_off, flashrom_cmd, futility_cmd]",
"were based off the commands for other boards. programmer = 'raiden_debug_spi:target=AP,serial=%s' % servo.serial",
"used. Returns: bool: True if fast is necessary, False otherwise. \"\"\" return use_futility",
"# -*- coding: utf-8 -*- # Copyright 2019 The Chromium OS Authors. All",
"Copyright 2019 The Chromium OS Authors. All rights reserved. # Use of this",
"configs to flash properly. Meant to be a temporary hack until b/143240576 is",
"on and turn off the SPI flash. The get_*_commands() functions provide a board-specific",
"not supported' % servo.version) flashrom_cmd = ['flashrom', '-p', programmer, '-w'] futility_cmd = ['futility',",
"'spi2_buf_en:off', 'spi2_buf_on_flex_en:off', 'cold_reset:off', ]) programmer = 'ft2232_spi:type=google-servo-v2,serial=%s' % servo.serial elif servo.is_micro: dut_control_on.append(['spi2_vref:pp1800', 'spi2_buf_en:on'])",
"fail on the verify step, adding --fast removes verification of the flash and",
"futility is to be used, False if flashrom. servo (servo_lib.Servo): The type name",
"voltage for Vref, to turn on and turn off the SPI flash. The",
"# Copyright 2019 The Chromium OS Authors. All rights reserved. # Use of",
"consistently fail on the verify step, adding --fast removes verification of the flash",
"[dut_control_on, dut_control_off, flashrom_cmd, futility_cmd] dut_control*=2d arrays formmated like [[\"cmd1\", \"arg1\", \"arg2\"], [\"cmd2\", \"arg3\",",
"in this function consistently fail on the verify step, adding --fast removes verification",
"flashrom futility_cmd=command to flash via futility \"\"\" dut_control_on = [] dut_control_off = []",
"be # found in the LICENSE file. \"\"\"Grunt configs.\"\"\" from __future__ import print_function",
"\"\"\" dut_control_on = [] dut_control_off = [] if servo.is_v2: dut_control_on.append([ 'spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on',",
"print_function def is_fast_required(use_futility, servo): \"\"\"Returns true if --fast is necessary to flash successfully.",
"b/143240576 is fixed. Args: use_futility (bool): True if futility is to be used,",
"servo (servo_lib.Servo): The servo connected to the target DUT. Returns: list: [dut_control_on, dut_control_off,",
"OS Authors. All rights reserved. # Use of this source code is governed",
"[] dut_control_off = [] if servo.is_v2: dut_control_on.append([ 'spi2_vref:pp1800', 'spi2_buf_en:on', 'spi2_buf_on_flex_en:on', 'cold_reset:on', ]) dut_control_off.append([",
"(servo_lib.Servo): The type name of the servo device being used. Returns: bool: True",
"in the LICENSE file. \"\"\"Grunt configs.\"\"\" from __future__ import print_function def is_fast_required(use_futility, servo):",
"servo device being used. Returns: bool: True if fast is necessary, False otherwise."
] |
[
"('django.db.models.fields.CharField', [], {'max_length': '100'}), 'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Unit']\"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now':",
"u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [],",
"{'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to':",
"'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank':",
"from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm):",
")) db.send_create_signal(u'organisation', ['Board']) # Adding model 'BoardMembership' db.create_table(u'organisation_boardmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True,",
"self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation', ['BoardMembership']) def backwards(self,",
"Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Person' db.create_table(u'organisation_person', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at',",
"self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation', ['BoardMembership']) def backwards(self, orm): # Deleting model 'Person' db.delete_table(u'organisation_person') #",
"backwards(self, orm): # Deleting model 'Person' db.delete_table(u'organisation_person') # Deleting model 'Unit' db.delete_table(u'organisation_unit') #",
"'200', 'blank': 'True'}) }, u'organisation.unit': { 'Meta': {'object_name': 'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add':",
"from south.db import db from south.v2 import SchemaMigration from django.db import models class",
"# Deleting model 'Person' db.delete_table(u'organisation_person') # Deleting model 'Unit' db.delete_table(u'organisation_unit') # Deleting model",
"('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), )) db.send_create_signal(u'organisation', ['Person']) # Adding model 'Unit'",
"[], {'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [],",
"# Adding model 'UnitMembership' db.create_table(u'organisation_unitmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True,",
"'100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.person': { 'Meta': {'object_name':",
"Deleting model 'Person' db.delete_table(u'organisation_person') # Deleting model 'Unit' db.delete_table(u'organisation_unit') # Deleting model 'UnitMembership'",
"('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),",
"('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.person':",
"Adding model 'BoardMembership' db.create_table(u'organisation_boardmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),",
"self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation',",
"}, u'organisation.boardmembership': { 'Meta': {'object_name': 'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Board']\"}), 'created_at': ('django.db.models.fields.DateTimeField',",
"{'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.boardmembership': { 'Meta':",
"u\"orm['organisation.Unit']\"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) } } complete_apps = ['organisation']",
"{'max_length': '100', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField',",
"('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.boardmembership': { 'Meta': {'object_name': 'BoardMembership'}, 'board':",
"('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField',",
"'Meta': {'object_name': 'Person'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [],",
"'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank':",
"{'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Unit']\"}), 'updated_at':",
"model 'UnitMembership' db.delete_table(u'organisation_unitmembership') # Deleting model 'Board' db.delete_table(u'organisation_board') # Deleting model 'BoardMembership' db.delete_table(u'organisation_boardmembership')",
"(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()), ))",
"('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation', ['UnitMembership']) # Adding model 'Board'",
"('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), ))",
"blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation', ['Unit']) # Adding model 'UnitMembership' db.create_table(u'organisation_unitmembership', ( (u'id',",
"Adding model 'UnitMembership' db.create_table(u'organisation_unitmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),",
")) db.send_create_signal(u'organisation', ['BoardMembership']) def backwards(self, orm): # Deleting model 'Person' db.delete_table(u'organisation_person') # Deleting",
"[], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True',",
"self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), )) db.send_create_signal(u'organisation', ['Person']) # Adding model 'Unit' db.create_table(u'organisation_unit',",
"'Meta': {'object_name': 'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [],",
"'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.boardmembership': { 'Meta': {'object_name': 'BoardMembership'},",
"}, u'organisation.unitmembership': { 'Meta': {'object_name': 'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),",
"self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation', ['Board']) #",
"('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation', ['BoardMembership']) def backwards(self, orm): # Deleting model 'Person' db.delete_table(u'organisation_person')",
"self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation', ['Board']) # Adding model 'BoardMembership' db.create_table(u'organisation_boardmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at',",
"'Person'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),",
"self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter',",
"('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, u'organisation.unit': { 'Meta': {'object_name': 'Unit'}, 'created_at':",
"= { u'organisation.board': { 'Meta': {'object_name': 'Board'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank':",
"model 'Unit' db.create_table(u'organisation_unit', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name',",
"db.create_table(u'organisation_board', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description',",
"self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation', ['UnitMembership']) # Adding model 'Board' db.create_table(u'organisation_board', ( (u'id',",
"'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank':",
"'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) },",
"{'object_name': 'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Board']\"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank':",
"'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length':",
"from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Person'",
"u\"orm['organisation.Board']\"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),",
"[], {'to': u\"orm['organisation.Unit']\"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) } } complete_apps",
"'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField',",
"(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation', ['Unit'])",
"Adding model 'Person' db.create_table(u'organisation_person', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),",
"'True'}) }, u'organisation.unitmembership': { 'Meta': {'object_name': 'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank':",
"[], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100',",
"{ 'Meta': {'object_name': 'Board'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField',",
"self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation', ['Unit']) # Adding model 'UnitMembership' db.create_table(u'organisation_unitmembership', (",
"db.send_create_signal(u'organisation', ['Person']) # Adding model 'Unit' db.create_table(u'organisation_unit', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),",
"[], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.unitmembership': { 'Meta': {'object_name': 'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField',",
"(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo',",
"'blank': 'True'}) }, u'organisation.person': { 'Meta': {'object_name': 'Person'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True',",
"[], {'max_length': '100', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}), 'updated_at':",
"self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])),",
"'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [],",
"blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation', ['BoardMembership']) def backwards(self, orm):",
"{'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key':",
"'Board'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id':",
"db.create_table(u'organisation_boardmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person',",
"('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation', ['UnitMembership']) #",
"[], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),",
"'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title':",
"{'auto_now': 'True', 'blank': 'True'}) }, u'organisation.unitmembership': { 'Meta': {'object_name': 'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField', [],",
"('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.person': { 'Meta': {'object_name': 'Person'}, 'created_at':",
"'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id':",
"['Board']) # Adding model 'BoardMembership' db.create_table(u'organisation_boardmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at',",
"[], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [],",
"u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Unit']\"}), 'updated_at': ('django.db.models.fields.DateTimeField',",
"self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100,",
"('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation', ['UnitMembership']) # Adding model 'Board' db.create_table(u'organisation_board', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),",
"self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation', ['UnitMembership']) # Adding",
"south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration):",
"model 'Unit' db.delete_table(u'organisation_unit') # Deleting model 'UnitMembership' db.delete_table(u'organisation_unitmembership') # Deleting model 'Board' db.delete_table(u'organisation_board')",
"south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): #",
"orm): # Deleting model 'Person' db.delete_table(u'organisation_person') # Deleting model 'Unit' db.delete_table(u'organisation_unit') # Deleting",
"'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) },",
"model 'Person' db.delete_table(u'organisation_person') # Deleting model 'Unit' db.delete_table(u'organisation_unit') # Deleting model 'UnitMembership' db.delete_table(u'organisation_unitmembership')",
"blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), )) db.send_create_signal(u'organisation', ['Person']) # Adding model 'Unit' db.create_table(u'organisation_unit', (",
"utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from",
"u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField', [],",
"blank=True)), )) db.send_create_signal(u'organisation', ['Person']) # Adding model 'Unit' db.create_table(u'organisation_unit', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at',",
"coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db",
"self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation',",
"{'to': u\"orm['organisation.Unit']\"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) } } complete_apps =",
"('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation', ['Unit']) # Adding",
"('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now':",
"{}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField',",
"'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True',",
"self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), )) db.send_create_signal(u'organisation', ['Person'])",
"{'max_length': '200', 'blank': 'True'}) }, u'organisation.unit': { 'Meta': {'object_name': 'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField', [],",
"{ u'organisation.board': { 'Meta': {'object_name': 'Board'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),",
"('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), )) db.send_create_signal(u'organisation', ['Person']) #",
"{'object_name': 'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key':",
"u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [],",
"(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit',",
"('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.unitmembership': { 'Meta': {'object_name': 'UnitMembership'}, 'created_at':",
")) db.send_create_signal(u'organisation', ['Person']) # Adding model 'Unit' db.create_table(u'organisation_unit', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True,",
"import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding",
"Adding model 'Board' db.create_table(u'organisation_board', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),",
"'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name':",
"from south.utils import datetime_utils as datetime from south.db import db from south.v2 import",
"'True'}) }, u'organisation.unit': { 'Meta': {'object_name': 'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank':",
"Deleting model 'Unit' db.delete_table(u'organisation_unit') # Deleting model 'UnitMembership' db.delete_table(u'organisation_unitmembership') # Deleting model 'Board'",
"[], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),",
"'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),",
"self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), )) db.send_create_signal(u'organisation', ['Person']) # Adding",
"(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board',",
"self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation', ['Board']) # Adding model 'BoardMembership'",
"db.create_table(u'organisation_person', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email',",
"model 'Board' db.create_table(u'organisation_board', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name',",
"('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation', ['BoardMembership']) def backwards(self, orm): # Deleting model",
"('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation', ['BoardMembership']) def backwards(self, orm): #",
"[], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key':",
"( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])),",
"{'to': u\"orm['organisation.Board']\"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key':",
"'Person' db.delete_table(u'organisation_person') # Deleting model 'Unit' db.delete_table(u'organisation_unit') # Deleting model 'UnitMembership' db.delete_table(u'organisation_unitmembership') #",
"'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [],",
"('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length':",
"[], {'max_length': '200', 'blank': 'True'}) }, u'organisation.unit': { 'Meta': {'object_name': 'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField',",
"blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), )) db.send_create_signal(u'organisation', ['Person']) # Adding model",
"# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db",
"blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation', ['Board']) # Adding",
"('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation', ['Board'])",
"-*- from south.utils import datetime_utils as datetime from south.db import db from south.v2",
"# Adding model 'Person' db.create_table(u'organisation_person', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True,",
"db.send_create_signal(u'organisation', ['Unit']) # Adding model 'UnitMembership' db.create_table(u'organisation_unitmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),",
"('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation', ['BoardMembership']) def",
"[], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.boardmembership': {",
"'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, u'organisation.unit': { 'Meta':",
"model 'Person' db.create_table(u'organisation_person', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name',",
"('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),",
"{ 'Meta': {'object_name': 'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Board']\"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add':",
"'UnitMembership' db.create_table(u'organisation_unitmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)),",
"# Adding model 'Unit' db.create_table(u'organisation_unit', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True,",
"import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from",
"as datetime from south.db import db from south.v2 import SchemaMigration from django.db import",
"{'auto_now': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, u'organisation.unit':",
"[], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at':",
"[], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True',",
"model 'UnitMembership' db.create_table(u'organisation_unitmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title',",
"{ 'Meta': {'object_name': 'Person'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField',",
"( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()),",
"'100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.boardmembership': { 'Meta': {'object_name':",
"blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation', ['Unit']) # Adding model 'UnitMembership'",
"'Board' db.create_table(u'organisation_board', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)),",
"'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Unit']\"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }",
"db.delete_table(u'organisation_person') # Deleting model 'Unit' db.delete_table(u'organisation_unit') # Deleting model 'UnitMembership' db.delete_table(u'organisation_unitmembership') # Deleting",
"('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Unit']\"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) } }",
"'BoardMembership' db.create_table(u'organisation_boardmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)),",
"{'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.unitmembership': { 'Meta':",
"'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})",
"('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length':",
"{'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at':",
"def backwards(self, orm): # Deleting model 'Person' db.delete_table(u'organisation_person') # Deleting model 'Unit' db.delete_table(u'organisation_unit')",
"'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200',",
"Adding model 'Unit' db.create_table(u'organisation_unit', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),",
"'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length':",
"'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name':",
"u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})",
"('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)),",
"self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])),",
"<reponame>Mindelirium/foundation # -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from",
"[], {'max_length': '18', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'url':",
"[], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.person': {",
"'blank': 'True'}) }, u'organisation.unit': { 'Meta': {'object_name': 'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True',",
"'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Board']\"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id':",
"'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now':",
"model 'Board' db.delete_table(u'organisation_board') # Deleting model 'BoardMembership' db.delete_table(u'organisation_boardmembership') models = { u'organisation.board': {",
"[], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Unit']\"}),",
"('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Board']\"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField',",
"db.send_create_signal(u'organisation', ['BoardMembership']) def backwards(self, orm): # Deleting model 'Person' db.delete_table(u'organisation_person') # Deleting model",
"model 'BoardMembership' db.delete_table(u'organisation_boardmembership') models = { u'organisation.board': { 'Meta': {'object_name': 'Board'}, 'created_at': ('django.db.models.fields.DateTimeField',",
"'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}),",
"{'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length':",
"db.send_create_signal(u'organisation', ['Board']) # Adding model 'BoardMembership' db.create_table(u'organisation_boardmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),",
"'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'unit': ('django.db.models.fields.related.ForeignKey', [],",
"('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.boardmembership':",
"self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), ))",
"models = { u'organisation.board': { 'Meta': {'object_name': 'Board'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True',",
"[], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.person': { 'Meta': {'object_name': 'Person'}, 'created_at': ('django.db.models.fields.DateTimeField',",
"u'organisation.person': { 'Meta': {'object_name': 'Person'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email':",
"blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation', ['UnitMembership']) # Adding model",
"'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Board']\"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),",
"{'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),",
"[], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [],",
"'100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.unitmembership': { 'Meta': {'object_name':",
"{'max_length': '100'}), 'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Unit']\"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank':",
"('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})",
"'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name':",
"datetime from south.db import db from south.v2 import SchemaMigration from django.db import models",
"( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation',",
"{ 'Meta': {'object_name': 'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField',",
"# Adding model 'BoardMembership' db.create_table(u'organisation_boardmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True,",
"blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation', ['Board']) # Adding model 'BoardMembership' db.create_table(u'organisation_boardmembership',",
"'blank': 'True'}) }, u'organisation.boardmembership': { 'Meta': {'object_name': 'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Board']\"}),",
"'100'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18',",
"u'organisation.unit': { 'Meta': {'object_name': 'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id':",
"{'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length':",
"'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),",
"{'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank':",
"db.delete_table(u'organisation_board') # Deleting model 'BoardMembership' db.delete_table(u'organisation_boardmembership') models = { u'organisation.board': { 'Meta': {'object_name':",
"'BoardMembership' db.delete_table(u'organisation_boardmembership') models = { u'organisation.board': { 'Meta': {'object_name': 'Board'}, 'created_at': ('django.db.models.fields.DateTimeField', [],",
"self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation', ['BoardMembership']) def backwards(self, orm): # Deleting",
"[], {'max_length': '100'}), 'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Unit']\"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True',",
"[], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [],",
"self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation',",
"('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length':",
"'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [],",
"('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField',",
"}, u'organisation.unit': { 'Meta': {'object_name': 'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),",
"u'organisation.unitmembership': { 'Meta': {'object_name': 'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id':",
"Deleting model 'UnitMembership' db.delete_table(u'organisation_unitmembership') # Deleting model 'Board' db.delete_table(u'organisation_board') # Deleting model 'BoardMembership'",
"('name', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation', ['Unit']) # Adding model 'UnitMembership' db.create_table(u'organisation_unitmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),",
"'True', 'blank': 'True'}) }, u'organisation.unitmembership': { 'Meta': {'object_name': 'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add':",
"'100'}), 'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Unit']\"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})",
"'True', 'blank': 'True'}) }, u'organisation.boardmembership': { 'Meta': {'object_name': 'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'to':",
"'100', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [],",
"# Deleting model 'Board' db.delete_table(u'organisation_board') # Deleting model 'BoardMembership' db.delete_table(u'organisation_boardmembership') models = {",
"import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Person' db.create_table(u'organisation_person', (",
"self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation', ['UnitMembership']) # Adding model 'Board' db.create_table(u'organisation_board',",
"def forwards(self, orm): # Adding model 'Person' db.create_table(u'organisation_person', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True,",
"('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.unitmembership':",
"SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model",
"# Deleting model 'BoardMembership' db.delete_table(u'organisation_boardmembership') models = { u'organisation.board': { 'Meta': {'object_name': 'Board'},",
"# Adding model 'Board' db.create_table(u'organisation_board', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True,",
"'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.person': { 'Meta': {'object_name': 'Person'},",
"('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), )) db.send_create_signal(u'organisation', ['Person']) # Adding model 'Unit' db.create_table(u'organisation_unit', ( (u'id',",
"'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField',",
"db.send_create_signal(u'organisation', ['UnitMembership']) # Adding model 'Board' db.create_table(u'organisation_board', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),",
"blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation', ['UnitMembership'])",
"'True'}) }, u'organisation.person': { 'Meta': {'object_name': 'Person'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank':",
"( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),",
"datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db",
"self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation', ['Unit']) #",
"Deleting model 'BoardMembership' db.delete_table(u'organisation_boardmembership') models = { u'organisation.board': { 'Meta': {'object_name': 'Board'}, 'created_at':",
"'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'unit': ('django.db.models.fields.related.ForeignKey',",
"'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.unitmembership': { 'Meta': {'object_name': 'UnitMembership'},",
"[], {'to': u\"orm['organisation.Board']\"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [],",
"'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at':",
"-*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import",
"'Meta': {'object_name': 'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [],",
"[], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.boardmembership': { 'Meta': {'object_name': 'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey',",
"'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Unit']\"}), 'updated_at': ('django.db.models.fields.DateTimeField', [],",
"blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18,",
"['Person']) # Adding model 'Unit' db.create_table(u'organisation_unit', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at',",
"self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), )) db.send_create_signal(u'organisation', ['Person']) # Adding model 'Unit' db.create_table(u'organisation_unit', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),",
"('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation', ['UnitMembership']) # Adding model 'Board' db.create_table(u'organisation_board', (",
"orm): # Adding model 'Person' db.create_table(u'organisation_person', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at',",
"'UnitMembership' db.delete_table(u'organisation_unitmembership') # Deleting model 'Board' db.delete_table(u'organisation_board') # Deleting model 'BoardMembership' db.delete_table(u'organisation_boardmembership') models",
"'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),",
"{'auto_now': 'True', 'blank': 'True'}) }, u'organisation.boardmembership': { 'Meta': {'object_name': 'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey', [],",
"db.delete_table(u'organisation_unit') # Deleting model 'UnitMembership' db.delete_table(u'organisation_unitmembership') # Deleting model 'Board' db.delete_table(u'organisation_board') # Deleting",
"self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation', ['Unit']) # Adding model 'UnitMembership' db.create_table(u'organisation_unitmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at',",
"south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration",
"'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, u'organisation.unit': {",
"Deleting model 'Board' db.delete_table(u'organisation_board') # Deleting model 'BoardMembership' db.delete_table(u'organisation_boardmembership') models = { u'organisation.board':",
"'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField',",
"'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),",
"import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def",
"('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey',",
"'Unit' db.create_table(u'organisation_unit', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)),",
"{'auto_now': 'True', 'blank': 'True'}) }, u'organisation.person': { 'Meta': {'object_name': 'Person'}, 'created_at': ('django.db.models.fields.DateTimeField', [],",
"'Board' db.delete_table(u'organisation_board') # Deleting model 'BoardMembership' db.delete_table(u'organisation_boardmembership') models = { u'organisation.board': { 'Meta':",
")) db.send_create_signal(u'organisation', ['Unit']) # Adding model 'UnitMembership' db.create_table(u'organisation_unitmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True,",
"('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation', ['Board']) # Adding model 'BoardMembership' db.create_table(u'organisation_boardmembership', (",
"self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url',",
")) db.send_create_signal(u'organisation', ['UnitMembership']) # Adding model 'Board' db.create_table(u'organisation_board', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True,",
"[], {'auto_now': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) },",
"self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation', ['Board']) # Adding model 'BoardMembership' db.create_table(u'organisation_boardmembership', ( (u'id',",
"['Unit']) # Adding model 'UnitMembership' db.create_table(u'organisation_unitmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at',",
"blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation', ['BoardMembership'])",
"'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField',",
"{'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank':",
"'Person' db.create_table(u'organisation_person', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)),",
"'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, u'organisation.unit': { 'Meta': {'object_name':",
"{'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo':",
"'75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField',",
"u'organisation.board': { 'Meta': {'object_name': 'Board'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description':",
"'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'twitter':",
"db.create_table(u'organisation_unit', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ))",
"{'max_length': '18', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField',",
"u'organisation.boardmembership': { 'Meta': {'object_name': 'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Board']\"}), 'created_at': ('django.db.models.fields.DateTimeField', [],",
"# Deleting model 'Unit' db.delete_table(u'organisation_unit') # Deleting model 'UnitMembership' db.delete_table(u'organisation_unitmembership') # Deleting model",
"['BoardMembership']) def backwards(self, orm): # Deleting model 'Person' db.delete_table(u'organisation_person') # Deleting model 'Unit'",
"('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('unit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), ))",
"'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),",
"django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Person' db.create_table(u'organisation_person',",
"{'object_name': 'Person'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length':",
"self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation', ['Unit']) # Adding model",
"('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), )) db.send_create_signal(u'organisation',",
"blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('twitter', self.gf('django.db.models.fields.CharField')(max_length=18, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200,",
"{'primary_key': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'unit':",
"{'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.person': { 'Meta':",
"class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Person' db.create_table(u'organisation_person', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),",
"('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'unit': ('django.db.models.fields.related.ForeignKey', [], {'to':",
"'18', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [],",
"('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),",
"'Meta': {'object_name': 'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Board']\"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True',",
"'Unit' db.delete_table(u'organisation_unit') # Deleting model 'UnitMembership' db.delete_table(u'organisation_unitmembership') # Deleting model 'Board' db.delete_table(u'organisation_board') #",
"}, u'organisation.person': { 'Meta': {'object_name': 'Person'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),",
"self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Unit'])), )) db.send_create_signal(u'organisation', ['UnitMembership']) # Adding model 'Board' db.create_table(u'organisation_board', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at',",
"'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person':",
"db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self,",
"('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now':",
"('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation', ['Board']) # Adding model",
"('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=100)), )) db.send_create_signal(u'organisation', ['Unit']) # Adding model 'UnitMembership' db.create_table(u'organisation_unitmembership',",
"db.create_table(u'organisation_unitmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('person',",
"['UnitMembership']) # Adding model 'Board' db.create_table(u'organisation_board', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at',",
"('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),",
"db.delete_table(u'organisation_boardmembership') models = { u'organisation.board': { 'Meta': {'object_name': 'Board'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add':",
"'Meta': {'object_name': 'Board'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [],",
"forwards(self, orm): # Adding model 'Person' db.create_table(u'organisation_person', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),",
"{'to': u\"orm['organisation.Person']\"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank':",
"('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [],",
"('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField',",
"'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField',",
"('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),",
"'blank': 'True'}) }, u'organisation.unitmembership': { 'Meta': {'object_name': 'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True',",
"{'object_name': 'Board'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}),",
"('description', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal(u'organisation', ['Board']) # Adding model 'BoardMembership' db.create_table(u'organisation_boardmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),",
"{'object_name': 'Unit'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key':",
"# Deleting model 'UnitMembership' db.delete_table(u'organisation_unitmembership') # Deleting model 'Board' db.delete_table(u'organisation_board') # Deleting model",
"db.delete_table(u'organisation_unitmembership') # Deleting model 'Board' db.delete_table(u'organisation_board') # Deleting model 'BoardMembership' db.delete_table(u'organisation_boardmembership') models =",
"models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Person' db.create_table(u'organisation_person', ( (u'id',",
"[], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'organisation.unitmembership': {",
"'True', 'blank': 'True'}) }, u'organisation.person': { 'Meta': {'object_name': 'Person'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add':",
"'True'}) }, u'organisation.boardmembership': { 'Meta': {'object_name': 'BoardMembership'}, 'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['organisation.Board']\"}), 'created_at':",
"{ 'Meta': {'object_name': 'UnitMembership'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField',",
"self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Person'])), ('board', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Board'])), )) db.send_create_signal(u'organisation', ['BoardMembership']) def backwards(self, orm): # Deleting model 'Person'",
"'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, u'organisation.unit': { 'Meta': {'object_name': 'Unit'},",
"model 'BoardMembership' db.create_table(u'organisation_boardmembership', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('title',",
"'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField',"
] |
[
"'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed' ], 'uk': [ 'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml' ], 'vi': [ 'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ]",
"import BeautifulSoup as bs languages_names = [x['name'] for x in languages] rss_sources =",
"os.path import join as join_path, isdir from shutil import rmtree from os import",
"already exists, should delete it and re-fetch the data? Y/N\\n\") if user_input.lower() !=",
"if len(validation_text) > 200: validation_text = validation_text[:200] f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\")) # print('\\t', title, '",
"'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed' ], 'fr': [ 'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ], 'hu': [ 'https://nepszava.hu/feed', 'https://www.vg.hu/feed/' ],",
"'http://day.kyiv.ua/uk/news-rss.xml' ], 'vi': [ 'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ] } def text_from_html(html): return bs(html, \"lxml\").text",
"for lang in ['vi']: for lang in languages_names: print(lang) if lang not in",
"'https://www.ad.nl/nieuws/rss.xml' ], 'no': [ 'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss' ], 'pl': [ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019' ], 'pt':",
"'fi': [ 'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed' ], 'fr': [ 'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ], 'hu': [ 'https://nepszava.hu/feed',",
"# print('\\t', title, ' -> ', summary, ' -> ', validation_text) print(\"\\tfound\", len(items),",
"validation_text[:200] f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\")) # print('\\t', title, ' -> ', summary, ' -> ',",
"'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ], 'hu': [ 'https://nepszava.hu/feed', 'https://www.vg.hu/feed/' ], 'it': [ 'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml' ],",
"= { 'da': [ 'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/' ], 'de': [ 'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/' ], 'en':",
"not in rss_sources: print(\"\\tSkipping\", lang, \"as there are no sources.\") continue with open(join_path(VALIDATION_SET_DIR,",
"], 'vi': [ 'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ] } def text_from_html(html): return bs(html, \"lxml\").text if",
"], 'nl': [ 'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml' ], 'no': [ 'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss' ], 'pl': [",
"# for lang in ['vi']: for lang in languages_names: print(lang) if lang not",
"data? Y/N\\n\") if user_input.lower() != 'y': print(\"Nothing to do.\") exit(0) else: print(\"Deleting old",
"[ 'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ] } def text_from_html(html): return bs(html, \"lxml\").text if __name__ ==",
"x in languages] rss_sources = { 'da': [ 'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/' ], 'de': [",
"validate set dir\", VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR) print(\"Creating new directory\", VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR) # for lang",
"dir\", VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR) print(\"Creating new directory\", VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR) # for lang in ['vi']:",
"+ sanitize_text(summary) if len(validation_text) > 200: validation_text = validation_text[:200] f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\")) # print('\\t',",
"sanitize_text(summary) if len(validation_text) > 200: validation_text = validation_text[:200] f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\")) # print('\\t', title,",
"'nl': [ 'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml' ], 'no': [ 'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss' ], 'pl': [ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml',",
"[ 'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml' ], 'vi': [ 'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ] } def text_from_html(html): return",
"f: for source in rss_sources[lang]: feed = feedparser.parse(source) items = feed.entries for item",
"' -> ', summary, ' -> ', validation_text) print(\"\\tfound\", len(items), \"feeds in\", source)",
"import rmtree from os import mkdir import feedparser from bs4 import BeautifulSoup as",
"return bs(html, \"lxml\").text if __name__ == '__main__': if isdir(VALIDATION_SET_DIR): user_input = input(\"Validation set",
"rss_sources[lang]: feed = feedparser.parse(source) items = feed.entries for item in items: title =",
"], 'fr': [ 'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ], 'hu': [ 'https://nepszava.hu/feed', 'https://www.vg.hu/feed/' ], 'it': [",
"text_from_html(html): return bs(html, \"lxml\").text if __name__ == '__main__': if isdir(VALIDATION_SET_DIR): user_input = input(\"Validation",
"lang), 'wb') as f: for source in rss_sources[lang]: feed = feedparser.parse(source) items =",
"items = feed.entries for item in items: title = text_from_html(item['title']) summary = text_from_html(item['summary'])",
"items: title = text_from_html(item['title']) summary = text_from_html(item['summary']) validation_text = sanitize_text(title) + ' '",
"], 'en': [ 'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ], 'es': [ 'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ], 'fi': [",
"to do.\") exit(0) else: print(\"Deleting old validate set dir\", VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR) print(\"Creating new",
"'http://www.ansa.it/campania/notizie/campania_rss.xml' ], 'nl': [ 'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml' ], 'no': [ 'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss' ], 'pl':",
"in rss_sources: print(\"\\tSkipping\", lang, \"as there are no sources.\") continue with open(join_path(VALIDATION_SET_DIR, lang),",
"feedparser.parse(source) items = feed.entries for item in items: title = text_from_html(item['title']) summary =",
"], 'ru': [ 'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml' ], 'sv': [ 'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed' ], 'uk': [",
"'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml' ], 'vi': [ 'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ] } def text_from_html(html): return bs(html,",
"'https://nepszava.hu/feed', 'https://www.vg.hu/feed/' ], 'it': [ 'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml' ], 'nl': [ 'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml' ],",
"'https://www.vg.hu/feed/' ], 'it': [ 'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml' ], 'nl': [ 'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml' ], 'no':",
"'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/' ], 'en': [ 'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ], 'es': [ 'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ],",
"'https://www.rp.pl/rss/1019' ], 'pt': [ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional' ], 'ro': [ 'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/' ], 'ru':",
"sources.\") continue with open(join_path(VALIDATION_SET_DIR, lang), 'wb') as f: for source in rss_sources[lang]: feed",
"lang in ['vi']: for lang in languages_names: print(lang) if lang not in rss_sources:",
"'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ], 'fi': [ 'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed' ], 'fr': [ 'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ],",
"'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ], 'es': [ 'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ], 'fi': [ 'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed' ],",
"'__main__': if isdir(VALIDATION_SET_DIR): user_input = input(\"Validation set directory already exists, should delete it",
"'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ] } def text_from_html(html): return bs(html, \"lxml\").text if __name__ == '__main__': if",
"len(validation_text) > 200: validation_text = validation_text[:200] f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\")) # print('\\t', title, ' ->",
"], 'fi': [ 'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed' ], 'fr': [ 'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ], 'hu': [",
"def text_from_html(html): return bs(html, \"lxml\").text if __name__ == '__main__': if isdir(VALIDATION_SET_DIR): user_input =",
"feed.entries for item in items: title = text_from_html(item['title']) summary = text_from_html(item['summary']) validation_text =",
"[ 'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml' ], 'no': [ 'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss' ], 'pl': [ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019'",
"bs(html, \"lxml\").text if __name__ == '__main__': if isdir(VALIDATION_SET_DIR): user_input = input(\"Validation set directory",
"rss_sources: print(\"\\tSkipping\", lang, \"as there are no sources.\") continue with open(join_path(VALIDATION_SET_DIR, lang), 'wb')",
"from common import * from os.path import join as join_path, isdir from shutil",
"'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ], 'fi': [ 'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed' ], 'fr': [ 'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ], 'hu':",
"text_from_html(item['title']) summary = text_from_html(item['summary']) validation_text = sanitize_text(title) + ' ' + sanitize_text(summary) if",
"], 'de': [ 'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/' ], 'en': [ 'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ], 'es': [",
"], 'sv': [ 'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed' ], 'uk': [ 'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml' ], 'vi': [",
"and re-fetch the data? Y/N\\n\") if user_input.lower() != 'y': print(\"Nothing to do.\") exit(0)",
"Y/N\\n\") if user_input.lower() != 'y': print(\"Nothing to do.\") exit(0) else: print(\"Deleting old validate",
"it and re-fetch the data? Y/N\\n\") if user_input.lower() != 'y': print(\"Nothing to do.\")",
"VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR) print(\"Creating new directory\", VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR) # for lang in ['vi']: for",
"mkdir import feedparser from bs4 import BeautifulSoup as bs languages_names = [x['name'] for",
"[ 'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed' ], 'fr': [ 'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ], 'hu': [ 'https://nepszava.hu/feed', 'https://www.vg.hu/feed/'",
"[ 'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss' ], 'pl': [ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019' ], 'pt': [ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional'",
"re-fetch the data? Y/N\\n\") if user_input.lower() != 'y': print(\"Nothing to do.\") exit(0) else:",
"lang in languages_names: print(lang) if lang not in rss_sources: print(\"\\tSkipping\", lang, \"as there",
"'https://www.aftenposten.no/rss' ], 'pl': [ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019' ], 'pt': [ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional' ], 'ro':",
"if __name__ == '__main__': if isdir(VALIDATION_SET_DIR): user_input = input(\"Validation set directory already exists,",
"= [x['name'] for x in languages] rss_sources = { 'da': [ 'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/'",
"as join_path, isdir from shutil import rmtree from os import mkdir import feedparser",
"'https://www.faz.net/rss/aktuell/' ], 'en': [ 'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ], 'es': [ 'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ], 'fi':",
"if lang not in rss_sources: print(\"\\tSkipping\", lang, \"as there are no sources.\") continue",
"new directory\", VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR) # for lang in ['vi']: for lang in languages_names:",
"!= 'y': print(\"Nothing to do.\") exit(0) else: print(\"Deleting old validate set dir\", VALIDATION_SET_DIR)",
"[ 'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/' ], 'de': [ 'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/' ], 'en': [ 'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml'",
"[ 'https://nepszava.hu/feed', 'https://www.vg.hu/feed/' ], 'it': [ 'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml' ], 'nl': [ 'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml'",
"'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ], 'hu': [ 'https://nepszava.hu/feed', 'https://www.vg.hu/feed/' ], 'it': [ 'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml' ], 'nl':",
"validation_text = sanitize_text(title) + ' ' + sanitize_text(summary) if len(validation_text) > 200: validation_text",
"['vi']: for lang in languages_names: print(lang) if lang not in rss_sources: print(\"\\tSkipping\", lang,",
"* from os.path import join as join_path, isdir from shutil import rmtree from",
"join_path, isdir from shutil import rmtree from os import mkdir import feedparser from",
"title = text_from_html(item['title']) summary = text_from_html(item['summary']) validation_text = sanitize_text(title) + ' ' +",
"print(lang) if lang not in rss_sources: print(\"\\tSkipping\", lang, \"as there are no sources.\")",
"lang not in rss_sources: print(\"\\tSkipping\", lang, \"as there are no sources.\") continue with",
"open(join_path(VALIDATION_SET_DIR, lang), 'wb') as f: for source in rss_sources[lang]: feed = feedparser.parse(source) items",
"from bs4 import BeautifulSoup as bs languages_names = [x['name'] for x in languages]",
"isdir(VALIDATION_SET_DIR): user_input = input(\"Validation set directory already exists, should delete it and re-fetch",
"from shutil import rmtree from os import mkdir import feedparser from bs4 import",
"validation_text = validation_text[:200] f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\")) # print('\\t', title, ' -> ', summary, '",
"[ 'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed' ], 'uk': [ 'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml' ], 'vi': [ 'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss'",
"print(\"Deleting old validate set dir\", VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR) print(\"Creating new directory\", VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR) #",
"f.write('\\n'.encode(\"UTF-8\")) # print('\\t', title, ' -> ', summary, ' -> ', validation_text) print(\"\\tfound\",",
"import feedparser from bs4 import BeautifulSoup as bs languages_names = [x['name'] for x",
"], 'pt': [ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional' ], 'ro': [ 'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/' ], 'ru': [",
"'vi': [ 'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ] } def text_from_html(html): return bs(html, \"lxml\").text if __name__",
"'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml' ], 'no': [ 'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss' ], 'pl': [ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019' ],",
"'it': [ 'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml' ], 'nl': [ 'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml' ], 'no': [ 'https://www.vg.no/rss/feed/forsiden/?format=rss',",
"join as join_path, isdir from shutil import rmtree from os import mkdir import",
"], 'uk': [ 'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml' ], 'vi': [ 'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ] } def",
"in languages_names: print(lang) if lang not in rss_sources: print(\"\\tSkipping\", lang, \"as there are",
"sanitize_text(title) + ' ' + sanitize_text(summary) if len(validation_text) > 200: validation_text = validation_text[:200]",
"'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml' ], 'sv': [ 'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed' ], 'uk': [ 'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml' ],",
"} def text_from_html(html): return bs(html, \"lxml\").text if __name__ == '__main__': if isdir(VALIDATION_SET_DIR): user_input",
"'uk': [ 'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml' ], 'vi': [ 'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ] } def text_from_html(html):",
"in rss_sources[lang]: feed = feedparser.parse(source) items = feed.entries for item in items: title",
"], 'it': [ 'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml' ], 'nl': [ 'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml' ], 'no': [",
"if user_input.lower() != 'y': print(\"Nothing to do.\") exit(0) else: print(\"Deleting old validate set",
"' ' + sanitize_text(summary) if len(validation_text) > 200: validation_text = validation_text[:200] f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\"))",
"= feedparser.parse(source) items = feed.entries for item in items: title = text_from_html(item['title']) summary",
"BeautifulSoup as bs languages_names = [x['name'] for x in languages] rss_sources = {",
"feed = feedparser.parse(source) items = feed.entries for item in items: title = text_from_html(item['title'])",
"'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/' ], 'de': [ 'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/' ], 'en': [ 'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ],",
"summary = text_from_html(item['summary']) validation_text = sanitize_text(title) + ' ' + sanitize_text(summary) if len(validation_text)",
"\"lxml\").text if __name__ == '__main__': if isdir(VALIDATION_SET_DIR): user_input = input(\"Validation set directory already",
"'da': [ 'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/' ], 'de': [ 'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/' ], 'en': [ 'http://feeds.washingtonpost.com/rss/rss_powerpost',",
"== '__main__': if isdir(VALIDATION_SET_DIR): user_input = input(\"Validation set directory already exists, should delete",
"for item in items: title = text_from_html(item['title']) summary = text_from_html(item['summary']) validation_text = sanitize_text(title)",
"input(\"Validation set directory already exists, should delete it and re-fetch the data? Y/N\\n\")",
"'ro': [ 'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/' ], 'ru': [ 'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml' ], 'sv': [ 'https://www.di.se/rss',",
"should delete it and re-fetch the data? Y/N\\n\") if user_input.lower() != 'y': print(\"Nothing",
"import mkdir import feedparser from bs4 import BeautifulSoup as bs languages_names = [x['name']",
"[ 'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/' ], 'ru': [ 'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml' ], 'sv': [ 'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed'",
"print(\"Nothing to do.\") exit(0) else: print(\"Deleting old validate set dir\", VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR) print(\"Creating",
"'https://www.arbetarbladet.se/feed' ], 'uk': [ 'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml' ], 'vi': [ 'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ] }",
"lang, \"as there are no sources.\") continue with open(join_path(VALIDATION_SET_DIR, lang), 'wb') as f:",
"] } def text_from_html(html): return bs(html, \"lxml\").text if __name__ == '__main__': if isdir(VALIDATION_SET_DIR):",
"'fr': [ 'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ], 'hu': [ 'https://nepszava.hu/feed', 'https://www.vg.hu/feed/' ], 'it': [ 'https://www.fanpage.it/feed/',",
"[x['name'] for x in languages] rss_sources = { 'da': [ 'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/' ],",
"= input(\"Validation set directory already exists, should delete it and re-fetch the data?",
"set directory already exists, should delete it and re-fetch the data? Y/N\\n\") if",
"the data? Y/N\\n\") if user_input.lower() != 'y': print(\"Nothing to do.\") exit(0) else: print(\"Deleting",
"VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR) # for lang in ['vi']: for lang in languages_names: print(lang) if",
"for lang in languages_names: print(lang) if lang not in rss_sources: print(\"\\tSkipping\", lang, \"as",
"\"as there are no sources.\") continue with open(join_path(VALIDATION_SET_DIR, lang), 'wb') as f: for",
"[ 'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ], 'es': [ 'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ], 'fi': [ 'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed'",
"as f: for source in rss_sources[lang]: feed = feedparser.parse(source) items = feed.entries for",
"import join as join_path, isdir from shutil import rmtree from os import mkdir",
"rmtree from os import mkdir import feedparser from bs4 import BeautifulSoup as bs",
"= text_from_html(item['title']) summary = text_from_html(item['summary']) validation_text = sanitize_text(title) + ' ' + sanitize_text(summary)",
"= sanitize_text(title) + ' ' + sanitize_text(summary) if len(validation_text) > 200: validation_text =",
"= text_from_html(item['summary']) validation_text = sanitize_text(title) + ' ' + sanitize_text(summary) if len(validation_text) >",
"from os.path import join as join_path, isdir from shutil import rmtree from os",
"isdir from shutil import rmtree from os import mkdir import feedparser from bs4",
"'pl': [ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019' ], 'pt': [ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional' ], 'ro': [ 'https://evz.ro/rss.xml',",
"'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/' ], 'ru': [ 'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml' ], 'sv': [ 'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed' ],",
"'no': [ 'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss' ], 'pl': [ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019' ], 'pt': [ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml',",
"in items: title = text_from_html(item['title']) summary = text_from_html(item['summary']) validation_text = sanitize_text(title) + '",
"from os import mkdir import feedparser from bs4 import BeautifulSoup as bs languages_names",
"bs languages_names = [x['name'] for x in languages] rss_sources = { 'da': [",
"delete it and re-fetch the data? Y/N\\n\") if user_input.lower() != 'y': print(\"Nothing to",
"' + sanitize_text(summary) if len(validation_text) > 200: validation_text = validation_text[:200] f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\")) #",
"= validation_text[:200] f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\")) # print('\\t', title, ' -> ', summary, ' ->",
"], 'ro': [ 'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/' ], 'ru': [ 'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml' ], 'sv': [",
"text_from_html(item['summary']) validation_text = sanitize_text(title) + ' ' + sanitize_text(summary) if len(validation_text) > 200:",
"rss_sources = { 'da': [ 'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/' ], 'de': [ 'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/' ],",
"'https://borsen.dk/rss/' ], 'de': [ 'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/' ], 'en': [ 'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ], 'es':",
"mkdir(VALIDATION_SET_DIR) # for lang in ['vi']: for lang in languages_names: print(lang) if lang",
"source in rss_sources[lang]: feed = feedparser.parse(source) items = feed.entries for item in items:",
"'pt': [ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional' ], 'ro': [ 'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/' ], 'ru': [ 'https://www.mk.ru/rss/index.xml',",
"bs4 import BeautifulSoup as bs languages_names = [x['name'] for x in languages] rss_sources",
"'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml' ], 'nl': [ 'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml' ], 'no': [ 'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss' ],",
"import * from os.path import join as join_path, isdir from shutil import rmtree",
"'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional' ], 'ro': [ 'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/' ], 'ru': [ 'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml' ],",
"with open(join_path(VALIDATION_SET_DIR, lang), 'wb') as f: for source in rss_sources[lang]: feed = feedparser.parse(source)",
"'en': [ 'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ], 'es': [ 'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ], 'fi': [ 'https://www.iltalehti.fi/rss/uutiset.xml',",
"directory already exists, should delete it and re-fetch the data? Y/N\\n\") if user_input.lower()",
"continue with open(join_path(VALIDATION_SET_DIR, lang), 'wb') as f: for source in rss_sources[lang]: feed =",
"print('\\t', title, ' -> ', summary, ' -> ', validation_text) print(\"\\tfound\", len(items), \"feeds",
"= feed.entries for item in items: title = text_from_html(item['title']) summary = text_from_html(item['summary']) validation_text",
"title, ' -> ', summary, ' -> ', validation_text) print(\"\\tfound\", len(items), \"feeds in\",",
"set dir\", VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR) print(\"Creating new directory\", VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR) # for lang in",
"for x in languages] rss_sources = { 'da': [ 'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/' ], 'de':",
"in languages] rss_sources = { 'da': [ 'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/' ], 'de': [ 'http://www.spiegel.de/index.rss',",
"for source in rss_sources[lang]: feed = feedparser.parse(source) items = feed.entries for item in",
"item in items: title = text_from_html(item['title']) summary = text_from_html(item['summary']) validation_text = sanitize_text(title) +",
"> 200: validation_text = validation_text[:200] f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\")) # print('\\t', title, ' -> ',",
"'http://feeds.jn.pt/JN-Nacional' ], 'ro': [ 'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/' ], 'ru': [ 'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml' ], 'sv':",
"'https://vnexpress.net/rss/tin-moi-nhat.rss', 'https://www.tienphong.vn/rss/ho-chi-minh-288.rss' ] } def text_from_html(html): return bs(html, \"lxml\").text if __name__ == '__main__':",
"in ['vi']: for lang in languages_names: print(lang) if lang not in rss_sources: print(\"\\tSkipping\",",
"200: validation_text = validation_text[:200] f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\")) # print('\\t', title, ' -> ', summary,",
"[ 'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml' ], 'nl': [ 'https://www.telegraaf.nl/rss', 'https://www.ad.nl/nieuws/rss.xml' ], 'no': [ 'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss'",
"else: print(\"Deleting old validate set dir\", VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR) print(\"Creating new directory\", VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR)",
"f.write(validation_text.encode(\"UTF-8\")) f.write('\\n'.encode(\"UTF-8\")) # print('\\t', title, ' -> ', summary, ' -> ', validation_text)",
"'de': [ 'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/' ], 'en': [ 'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ], 'es': [ 'http://ep00.epimg.net/rss/elpais/portada.xml',",
"'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss' ], 'pl': [ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019' ], 'pt': [ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional' ],",
"], 'pl': [ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019' ], 'pt': [ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional' ], 'ro': [",
"shutil import rmtree from os import mkdir import feedparser from bs4 import BeautifulSoup",
"exists, should delete it and re-fetch the data? Y/N\\n\") if user_input.lower() != 'y':",
"print(\"Creating new directory\", VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR) # for lang in ['vi']: for lang in",
"user_input = input(\"Validation set directory already exists, should delete it and re-fetch the",
"[ 'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ], 'hu': [ 'https://nepszava.hu/feed', 'https://www.vg.hu/feed/' ], 'it': [ 'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml'",
"if isdir(VALIDATION_SET_DIR): user_input = input(\"Validation set directory already exists, should delete it and",
"'y': print(\"Nothing to do.\") exit(0) else: print(\"Deleting old validate set dir\", VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR)",
"feedparser from bs4 import BeautifulSoup as bs languages_names = [x['name'] for x in",
"+ ' ' + sanitize_text(summary) if len(validation_text) > 200: validation_text = validation_text[:200] f.write(validation_text.encode(\"UTF-8\"))",
"directory\", VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR) # for lang in ['vi']: for lang in languages_names: print(lang)",
"'wb') as f: for source in rss_sources[lang]: feed = feedparser.parse(source) items = feed.entries",
"[ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019' ], 'pt': [ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional' ], 'ro': [ 'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/'",
"there are no sources.\") continue with open(join_path(VALIDATION_SET_DIR, lang), 'wb') as f: for source",
"__name__ == '__main__': if isdir(VALIDATION_SET_DIR): user_input = input(\"Validation set directory already exists, should",
"languages_names = [x['name'] for x in languages] rss_sources = { 'da': [ 'https://politiken.dk/rss/senestenyt.rss',",
"languages] rss_sources = { 'da': [ 'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/' ], 'de': [ 'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/'",
"'es': [ 'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ], 'fi': [ 'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed' ], 'fr': [ 'https://www.lemonde.fr/rss/une.xml',",
"], 'es': [ 'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ], 'fi': [ 'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed' ], 'fr': [",
"'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019' ], 'pt': [ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional' ], 'ro': [ 'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/' ],",
"languages_names: print(lang) if lang not in rss_sources: print(\"\\tSkipping\", lang, \"as there are no",
"are no sources.\") continue with open(join_path(VALIDATION_SET_DIR, lang), 'wb') as f: for source in",
"print(\"\\tSkipping\", lang, \"as there are no sources.\") continue with open(join_path(VALIDATION_SET_DIR, lang), 'wb') as",
"do.\") exit(0) else: print(\"Deleting old validate set dir\", VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR) print(\"Creating new directory\",",
"'https://www.uusisuomi.fi/raha/feed' ], 'fr': [ 'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml' ], 'hu': [ 'https://nepszava.hu/feed', 'https://www.vg.hu/feed/' ], 'it':",
"], 'hu': [ 'https://nepszava.hu/feed', 'https://www.vg.hu/feed/' ], 'it': [ 'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml' ], 'nl': [",
"'ru': [ 'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml' ], 'sv': [ 'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed' ], 'uk': [ 'https://ukurier.gov.ua/uk/feed/',",
"common import * from os.path import join as join_path, isdir from shutil import",
"'hu': [ 'https://nepszava.hu/feed', 'https://www.vg.hu/feed/' ], 'it': [ 'https://www.fanpage.it/feed/', 'http://www.ansa.it/campania/notizie/campania_rss.xml' ], 'nl': [ 'https://www.telegraaf.nl/rss',",
"rmtree(VALIDATION_SET_DIR) print(\"Creating new directory\", VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR) # for lang in ['vi']: for lang",
"exit(0) else: print(\"Deleting old validate set dir\", VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR) print(\"Creating new directory\", VALIDATION_SET_DIR)",
"[ 'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml' ], 'sv': [ 'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed' ], 'uk': [ 'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml'",
"'sv': [ 'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed' ], 'uk': [ 'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml' ], 'vi': [ 'https://vnexpress.net/rss/tin-moi-nhat.rss',",
"user_input.lower() != 'y': print(\"Nothing to do.\") exit(0) else: print(\"Deleting old validate set dir\",",
"old validate set dir\", VALIDATION_SET_DIR) rmtree(VALIDATION_SET_DIR) print(\"Creating new directory\", VALIDATION_SET_DIR) mkdir(VALIDATION_SET_DIR) # for",
"], 'no': [ 'https://www.vg.no/rss/feed/forsiden/?format=rss', 'https://www.aftenposten.no/rss' ], 'pl': [ 'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml', 'https://www.rp.pl/rss/1019' ], 'pt': [",
"'https://adevarul.ro/rss/' ], 'ru': [ 'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml' ], 'sv': [ 'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed' ], 'uk':",
"as bs languages_names = [x['name'] for x in languages] rss_sources = { 'da':",
"os import mkdir import feedparser from bs4 import BeautifulSoup as bs languages_names =",
"'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ], 'es': [ 'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ], 'fi': [ 'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed' ], 'fr':",
"{ 'da': [ 'https://politiken.dk/rss/senestenyt.rss', 'https://borsen.dk/rss/' ], 'de': [ 'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/' ], 'en': [",
"[ 'http://www.spiegel.de/index.rss', 'https://www.faz.net/rss/aktuell/' ], 'en': [ 'http://feeds.washingtonpost.com/rss/rss_powerpost', 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml' ], 'es': [ 'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml'",
"[ 'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml', 'http://feeds.jn.pt/JN-Nacional' ], 'ro': [ 'https://evz.ro/rss.xml', 'https://adevarul.ro/rss/' ], 'ru': [ 'https://www.mk.ru/rss/index.xml', 'https://iz.ru/xml/rss/all.xml'",
"[ 'http://ep00.epimg.net/rss/elpais/portada.xml', 'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml' ], 'fi': [ 'https://www.iltalehti.fi/rss/uutiset.xml', 'https://www.uusisuomi.fi/raha/feed' ], 'fr': [ 'https://www.lemonde.fr/rss/une.xml', 'http://www.lefigaro.fr/rss/figaro_flash-actu.xml'",
"'https://iz.ru/xml/rss/all.xml' ], 'sv': [ 'https://www.di.se/rss', 'https://www.arbetarbladet.se/feed' ], 'uk': [ 'https://ukurier.gov.ua/uk/feed/', 'http://day.kyiv.ua/uk/news-rss.xml' ], 'vi':",
"no sources.\") continue with open(join_path(VALIDATION_SET_DIR, lang), 'wb') as f: for source in rss_sources[lang]:"
] |
[
"type=int, default=64, help='batch size') parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') parser.add_argument('--dropout', type=float, default=0.3, help='dropout",
"# g['lr'] = lr train_loss = [] train_mape = [] train_rmse = []",
"def main(): # set seed # torch.manual_seed(args.seed) # np.random.seed(args.seed) # load data device",
"for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3)",
"Train RMSE: {:.4f}' print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1])) t2 = time.time() train_time.append(t2 - t1)",
"args.batch_size, args.batch_size, args.batch_size) scaler = dataloader['scaler'] supports = [torch.tensor(i).to(device) for i in adj_mx]",
"outputs.append(preds.squeeze()) yhat = torch.cat(outputs, dim=0) yhat = yhat[:realy.size(0), ...] print(\"Training finished\") print(\"The valid",
"mvalid_loss = np.mean(valid_loss) mvalid_mape = np.mean(valid_mape) mvalid_rmse = np.mean(valid_rmse) his_loss.append(mvalid_loss) log = 'Epoch:",
"torch.no_grad(): preds = engine.model(testx).transpose(1, 3) outputs.append(preds.squeeze()) yhat = torch.cat(outputs, dim=0) yhat = yhat[:realy.size(0),",
"\"_exp\" + str(args.expid) + \"_best_\" + str(round(his_loss[bestid], 2)) + \".pth\") if __name__ ==",
"for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(i",
"mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1))) torch.save(engine.model.state_dict(), args.save + \"_epoch_\" +",
"= trainy.transpose(1, 3) metrics = engine.train(trainx, trainy[:, 0, :, :]) train_loss.append(metrics[0]) train_mape.append(metrics[1]) train_rmse.append(metrics[2])",
"{:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(np.mean(amae), np.mean(amape), np.mean(armse))) torch.save(engine.model.state_dict(), args.save +",
"trainx.transpose(1, 3) trainy = torch.Tensor(y).to(device) trainy = trainy.transpose(1, 3) metrics = engine.train(trainx, trainy[:,",
"print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1))) torch.save(engine.model.state_dict(), args.save +",
"** (i // 10))) # for g in engine.optimizer.param_groups: # g['lr'] = lr",
"if iter % args.print_every == 0: log = 'Iter: {:03d}, Train Loss: {:.4f},",
"{:.4f} secs' print(log.format(i, (s2 - s1))) val_time.append(s2 - s1) mtrain_loss = np.mean(train_loss) mtrain_mape",
"parser.add_argument('--expid', type=int, default=1, help='experiment id') args = parser.parse_args() def main(): # set seed",
"Inference Time: {:.4f} secs\".format(np.mean(val_time))) # testing bestid = np.argmin(his_loss) engine.model.load_state_dict( torch.load(args.save + \"_epoch_\"",
"# if i % 10 == 0: # lr = max(0.000002,args.learning_rate * (0.1",
"+ str(args.expid) + \"_best_\" + str(round(his_loss[bestid], 2)) + \".pth\") if __name__ == \"__main__\":",
"+ \"_exp\" + str(args.expid) + \"_best_\" + str(round(his_loss[bestid], 2)) + \".pth\") if __name__",
"scaler = dataloader['scaler'] supports = [torch.tensor(i).to(device) for i in adj_mx] print(args) if args.randomadj:",
"test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE:",
"secs' print(log.format(i, (s2 - s1))) val_time.append(s2 - s1) mtrain_loss = np.mean(train_loss) mtrain_mape =",
"supports = None engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout, args.learning_rate, args.weight_decay,",
"parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate') parser.add_argument('--weight_decay', type=float, default=0.0001,",
"+ \".pth\")) outputs = [] realy = torch.Tensor(dataloader['y_test']).to(device) realy = realy.transpose(1, 3)[:, 0,",
"testing bestid = np.argmin(his_loss) engine.model.load_state_dict( torch.load(args.save + \"_epoch_\" + str(bestid + 1) +",
"valid_rmse.append(metrics[2]) s2 = time.time() log = 'Epoch: {:03d}, Inference Time: {:.4f} secs' print(log.format(i,",
"[] train_rmse = [] t1 = time.time() dataloader['train_loader'].shuffle() for iter, (x, y) in",
"= [] val_time = [] train_time = [] for i in range(1, args.epochs",
"= np.mean(train_loss) mtrain_mape = np.mean(train_mape) mtrain_rmse = np.mean(train_rmse) mvalid_loss = np.mean(valid_loss) mvalid_mape =",
"default=0.001, help='learning rate') parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay",
"[] train_time = [] for i in range(1, args.epochs + 1): # if",
"10))) # for g in engine.optimizer.param_groups: # g['lr'] = lr train_loss = []",
"convolution layer') parser.add_argument('--aptonly', action='store_true', help='whether only adaptive adj') parser.add_argument('--addaptadj', action='store_true', help='whether add adaptive",
"add graph convolution layer') parser.add_argument('--aptonly', action='store_true', help='whether only adaptive adj') parser.add_argument('--addaptadj', action='store_true', help='whether",
"{:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}' print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1])) t2 =",
"type=int, default=50, help='') parser.add_argument('--print_every', type=int, default=50, help='') # parser.add_argument('--seed',type=int,default=99,help='random seed') parser.add_argument('--save', type=str, default='./garage/metr',",
"= 'Epoch: {:03d}, Inference Time: {:.4f} secs' print(log.format(i, (s2 - s1))) val_time.append(s2 -",
"+ \"_\" + str(round(mvalid_loss, 2)) + \".pth\") print(\"Average Training Time: {:.4f} secs/epoch\".format(np.mean(train_time))) print(\"Average",
"in range(1, args.epochs + 1): # if i % 10 == 0: #",
":, :]) train_loss.append(metrics[0]) train_mape.append(metrics[1]) train_rmse.append(metrics[2]) if iter % args.print_every == 0: log =",
"time.time() train_time.append(t2 - t1) # validation valid_loss = [] valid_mape = [] valid_rmse",
"as np import argparse import time import util import matplotlib.pyplot as plt from",
"train_rmse[-1])) t2 = time.time() train_time.append(t2 - t1) # validation valid_loss = [] valid_mape",
"- s1))) val_time.append(s2 - s1) mtrain_loss = np.mean(train_loss) mtrain_mape = np.mean(train_mape) mtrain_rmse =",
"type=int, default=2, help='inputs dimension') parser.add_argument('--num_nodes', type=int, default=207, help='number of nodes') parser.add_argument('--batch_size', type=int, default=64,",
"help='whether to add graph convolution layer') parser.add_argument('--aptonly', action='store_true', help='whether only adaptive adj') parser.add_argument('--addaptadj',",
"torch.Tensor(dataloader['y_test']).to(device) realy = realy.transpose(1, 3)[:, 0, :, :] for iter, (x, y) in",
"mtrain_loss = np.mean(train_loss) mtrain_mape = np.mean(train_mape) mtrain_rmse = np.mean(train_rmse) mvalid_loss = np.mean(valid_loss) mvalid_mape",
"as plt from engine import trainer parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda:0', help='')",
"Inference Time: {:.4f} secs' print(log.format(i, (s2 - s1))) val_time.append(s2 - s1) mtrain_loss =",
"Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid",
"testy = torch.Tensor(y).to(device) testy = testy.transpose(1, 3) metrics = engine.eval(testx, testy[:, 0, :,",
"default=12, help='') parser.add_argument('--nhid', type=int, default=32, help='') parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension') parser.add_argument('--num_nodes', type=int,",
"train_time.append(t2 - t1) # validation valid_loss = [] valid_mape = [] valid_rmse =",
"type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data path') parser.add_argument('--adjtype', type=str, default='doubletransition', help='adj type') parser.add_argument('--gcn_bool', action='store_true', help='whether",
"str(args.expid) + \"_best_\" + str(round(his_loss[bestid], 2)) + \".pth\") if __name__ == \"__main__\": t1",
"amape = [] armse = [] for i in range(12): pred = scaler.inverse_transform(yhat[:,",
"'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE:",
"in adj_mx] print(args) if args.randomadj: adjinit = None else: adjinit = supports[0] if",
"parser.add_argument('--nhid', type=int, default=32, help='') parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension') parser.add_argument('--num_nodes', type=int, default=207, help='number",
"t1) # validation valid_loss = [] valid_mape = [] valid_rmse = [] s1",
"seed # torch.manual_seed(args.seed) # np.random.seed(args.seed) # load data device = torch.device(args.device) sensor_ids, sensor_id_to_ind,",
"str(round(his_loss[bestid], 2)) + \".pth\") if __name__ == \"__main__\": t1 = time.time() main() t2",
"parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda:0', help='') parser.add_argument('--data', type=str, default='data/METR-LA', help='data path') parser.add_argument('--adjdata',",
"parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension') parser.add_argument('--num_nodes', type=int, default=207, help='number of nodes') parser.add_argument('--batch_size', type=int,",
"{:.4f}, Test RMSE: {:.4f}' print(log.format(i + 1, metrics[0], metrics[1], metrics[2])) amae.append(metrics[0]) amape.append(metrics[1]) armse.append(metrics[2])",
"dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size) scaler = dataloader['scaler'] supports = [torch.tensor(i).to(device) for",
"default=32, help='') parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension') parser.add_argument('--num_nodes', type=int, default=207, help='number of nodes')",
"armse = [] for i in range(12): pred = scaler.inverse_transform(yhat[:, :, i]) real",
"args.adjtype) dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size) scaler = dataloader['scaler'] supports = [torch.tensor(i).to(device)",
"+ str(i) + \"_\" + str(round(mvalid_loss, 2)) + \".pth\") print(\"Average Training Time: {:.4f}",
"= testx.transpose(1, 3) with torch.no_grad(): preds = engine.model(testx).transpose(1, 3) outputs.append(preds.squeeze()) yhat = torch.cat(outputs,",
"args.seq_length, args.num_nodes, args.nhid, args.dropout, args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj, adjinit) print(\"start training...\")",
"parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') parser.add_argument('--dropout', type=float, default=0.3,",
"parser.add_argument('--num_nodes', type=int, default=207, help='number of nodes') parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--learning_rate', type=float,",
"{:.4f}, Train RMSE: {:.4f}' print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1])) t2 = time.time() train_time.append(t2 -",
"iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) testy",
"# testing bestid = np.argmin(his_loss) engine.model.load_state_dict( torch.load(args.save + \"_epoch_\" + str(bestid + 1)",
"= 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid",
"t1))) torch.save(engine.model.state_dict(), args.save + \"_epoch_\" + str(i) + \"_\" + str(round(mvalid_loss, 2)) +",
"= [] for i in range(12): pred = scaler.inverse_transform(yhat[:, :, i]) real =",
"train_mape = [] train_rmse = [] t1 = time.time() dataloader['train_loader'].shuffle() for iter, (x,",
"str(round(mvalid_loss, 2)) + \".pth\") print(\"Average Training Time: {:.4f} secs/epoch\".format(np.mean(train_time))) print(\"Average Inference Time: {:.4f}",
"engine.optimizer.param_groups: # g['lr'] = lr train_loss = [] train_mape = [] train_rmse =",
"[] amape = [] armse = [] for i in range(12): pred =",
"default=0.0001, help='weight decay rate') parser.add_argument('--epochs', type=int, default=50, help='') parser.add_argument('--print_every', type=int, default=50, help='') #",
"help='dropout rate') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay rate') parser.add_argument('--epochs', type=int, default=50, help='') parser.add_argument('--print_every',",
"= testy.transpose(1, 3) metrics = engine.eval(testx, testy[:, 0, :, :]) valid_loss.append(metrics[0]) valid_mape.append(metrics[1]) valid_rmse.append(metrics[2])",
"val_time.append(s2 - s1) mtrain_loss = np.mean(train_loss) mtrain_mape = np.mean(train_mape) mtrain_rmse = np.mean(train_rmse) mvalid_loss",
"np.mean(train_mape) mtrain_rmse = np.mean(train_rmse) mvalid_loss = np.mean(valid_loss) mvalid_mape = np.mean(valid_mape) mvalid_rmse = np.mean(valid_rmse)",
"args = parser.parse_args() def main(): # set seed # torch.manual_seed(args.seed) # np.random.seed(args.seed) #",
"in enumerate(dataloader['test_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) with torch.no_grad(): preds =",
"+ str(round(mvalid_loss, 2)) + \".pth\") print(\"Average Training Time: {:.4f} secs/epoch\".format(np.mean(train_time))) print(\"Average Inference Time:",
"{:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch' print(log.format(i, mtrain_loss, mtrain_mape,",
":] for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1,",
"train_mape[-1], train_rmse[-1])) t2 = time.time() train_time.append(t2 - t1) # validation valid_loss = []",
"validation valid_loss = [] valid_mape = [] valid_rmse = [] s1 = time.time()",
"data path') parser.add_argument('--adjtype', type=str, default='doubletransition', help='adj type') parser.add_argument('--gcn_bool', action='store_true', help='whether to add graph",
"print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1])) t2 = time.time() train_time.append(t2 - t1) # validation valid_loss",
"valid_mape = [] valid_rmse = [] s1 = time.time() for iter, (x, y)",
"numpy as np import argparse import time import util import matplotlib.pyplot as plt",
"{:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch' print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape,",
"in enumerate(dataloader['val_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) testy = torch.Tensor(y).to(device) testy",
"Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(np.mean(amae), np.mean(amape), np.mean(armse))) torch.save(engine.model.state_dict(),",
"(s2 - s1))) val_time.append(s2 - s1) mtrain_loss = np.mean(train_loss) mtrain_mape = np.mean(train_mape) mtrain_rmse",
"time import util import matplotlib.pyplot as plt from engine import trainer parser =",
"train_time = [] for i in range(1, args.epochs + 1): # if i",
"random initialize adaptive adj') parser.add_argument('--seq_length', type=int, default=12, help='') parser.add_argument('--nhid', type=int, default=32, help='') parser.add_argument('--in_dim',",
"args.nhid, args.dropout, args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj, adjinit) print(\"start training...\") his_loss =",
"# load data device = torch.device(args.device) sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata, args.adjtype) dataloader",
"adaptive adj') parser.add_argument('--addaptadj', action='store_true', help='whether add adaptive adj') parser.add_argument('--randomadj', action='store_true', help='whether random initialize",
"np.mean(train_rmse) mvalid_loss = np.mean(valid_loss) mvalid_mape = np.mean(valid_mape) mvalid_rmse = np.mean(valid_rmse) his_loss.append(mvalid_loss) log =",
"help='whether only adaptive adj') parser.add_argument('--addaptadj', action='store_true', help='whether add adaptive adj') parser.add_argument('--randomadj', action='store_true', help='whether",
"i] metrics = util.metric(pred, real) log = 'Evaluate best model on test data",
"'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test",
"str(bestid + 1) + \"_\" + str(round(his_loss[bestid], 2)) + \".pth\")) outputs = []",
"torch.Tensor(x).to(device) testx = testx.transpose(1, 3) with torch.no_grad(): preds = engine.model(testx).transpose(1, 3) outputs.append(preds.squeeze()) yhat",
"adaptive adj') parser.add_argument('--seq_length', type=int, default=12, help='') parser.add_argument('--nhid', type=int, default=32, help='') parser.add_argument('--in_dim', type=int, default=2,",
"adjinit = None else: adjinit = supports[0] if args.aptonly: supports = None engine",
"trainy.transpose(1, 3) metrics = engine.train(trainx, trainy[:, 0, :, :]) train_loss.append(metrics[0]) train_mape.append(metrics[1]) train_rmse.append(metrics[2]) if",
":, :]) valid_loss.append(metrics[0]) valid_mape.append(metrics[1]) valid_rmse.append(metrics[2]) s2 = time.time() log = 'Epoch: {:03d}, Inference",
"+ \"_epoch_\" + str(bestid + 1) + \"_\" + str(round(his_loss[bestid], 2)) + \".pth\"))",
"loss on best model is\", str(round(his_loss[bestid], 4))) amae = [] amape = []",
"= realy[:, :, i] metrics = util.metric(pred, real) log = 'Evaluate best model",
"2)) + \".pth\") if __name__ == \"__main__\": t1 = time.time() main() t2 =",
"torch.Tensor(x).to(device) testx = testx.transpose(1, 3) testy = torch.Tensor(y).to(device) testy = testy.transpose(1, 3) metrics",
"pred = scaler.inverse_transform(yhat[:, :, i]) real = realy[:, :, i] metrics = util.metric(pred,",
"== 0: # lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10))) #",
"{:.4f}/epoch' print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1))) torch.save(engine.model.state_dict(), args.save",
"help='') parser.add_argument('--print_every', type=int, default=50, help='') # parser.add_argument('--seed',type=int,default=99,help='random seed') parser.add_argument('--save', type=str, default='./garage/metr', help='save path')",
"{:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f},",
"= parser.parse_args() def main(): # set seed # torch.manual_seed(args.seed) # np.random.seed(args.seed) # load",
"3) with torch.no_grad(): preds = engine.model(testx).transpose(1, 3) outputs.append(preds.squeeze()) yhat = torch.cat(outputs, dim=0) yhat",
"= torch.device(args.device) sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata, args.adjtype) dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size,",
"parser.add_argument('--randomadj', action='store_true', help='whether random initialize adaptive adj') parser.add_argument('--seq_length', type=int, default=12, help='') parser.add_argument('--nhid', type=int,",
"rate') parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay rate') parser.add_argument('--epochs',",
"help='weight decay rate') parser.add_argument('--epochs', type=int, default=50, help='') parser.add_argument('--print_every', type=int, default=50, help='') # parser.add_argument('--seed',type=int,default=99,help='random",
"type=int, default=50, help='') # parser.add_argument('--seed',type=int,default=99,help='random seed') parser.add_argument('--save', type=str, default='./garage/metr', help='save path') parser.add_argument('--expid', type=int,",
"RMSE: {:.4f}' print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1])) t2 = time.time() train_time.append(t2 - t1) #",
"enumerate(dataloader['train_loader'].get_iterator()): trainx = torch.Tensor(x).to(device) trainx = trainx.transpose(1, 3) trainy = torch.Tensor(y).to(device) trainy =",
"'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss:",
"engine.model.load_state_dict( torch.load(args.save + \"_epoch_\" + str(bestid + 1) + \"_\" + str(round(his_loss[bestid], 2))",
"mtrain_mape = np.mean(train_mape) mtrain_rmse = np.mean(train_rmse) mvalid_loss = np.mean(valid_loss) mvalid_mape = np.mean(valid_mape) mvalid_rmse",
"Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch' print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse,",
"if __name__ == \"__main__\": t1 = time.time() main() t2 = time.time() print(\"Total time",
"torch import numpy as np import argparse import time import util import matplotlib.pyplot",
"default=50, help='') parser.add_argument('--print_every', type=int, default=50, help='') # parser.add_argument('--seed',type=int,default=99,help='random seed') parser.add_argument('--save', type=str, default='./garage/metr', help='save",
"# parser.add_argument('--seed',type=int,default=99,help='random seed') parser.add_argument('--save', type=str, default='./garage/metr', help='save path') parser.add_argument('--expid', type=int, default=1, help='experiment id')",
"= time.time() log = 'Epoch: {:03d}, Inference Time: {:.4f} secs' print(log.format(i, (s2 -",
"\"_best_\" + str(round(his_loss[bestid], 2)) + \".pth\") if __name__ == \"__main__\": t1 = time.time()",
"testy[:, 0, :, :]) valid_loss.append(metrics[0]) valid_mape.append(metrics[1]) valid_rmse.append(metrics[2]) s2 = time.time() log = 'Epoch:",
"\".pth\")) outputs = [] realy = torch.Tensor(dataloader['y_test']).to(device) realy = realy.transpose(1, 3)[:, 0, :,",
"i % 10 == 0: # lr = max(0.000002,args.learning_rate * (0.1 ** (i",
"+ \"_epoch_\" + str(i) + \"_\" + str(round(mvalid_loss, 2)) + \".pth\") print(\"Average Training",
"RMSE: {:.4f}' print(log.format(np.mean(amae), np.mean(amape), np.mean(armse))) torch.save(engine.model.state_dict(), args.save + \"_exp\" + str(args.expid) + \"_best_\"",
"= time.time() train_time.append(t2 - t1) # validation valid_loss = [] valid_mape = []",
"12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(np.mean(amae), np.mean(amape),",
"secs/epoch\".format(np.mean(train_time))) print(\"Average Inference Time: {:.4f} secs\".format(np.mean(val_time))) # testing bestid = np.argmin(his_loss) engine.model.load_state_dict( torch.load(args.save",
"= [] train_mape = [] train_rmse = [] t1 = time.time() dataloader['train_loader'].shuffle() for",
"+ 1): # if i % 10 == 0: # lr = max(0.000002,args.learning_rate",
"+ \"_\" + str(round(his_loss[bestid], 2)) + \".pth\")) outputs = [] realy = torch.Tensor(dataloader['y_test']).to(device)",
"3) trainy = torch.Tensor(y).to(device) trainy = trainy.transpose(1, 3) metrics = engine.train(trainx, trainy[:, 0,",
"over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(np.mean(amae),",
"= torch.Tensor(y).to(device) trainy = trainy.transpose(1, 3) metrics = engine.train(trainx, trainy[:, 0, :, :])",
"RMSE: {:.4f}, Training Time: {:.4f}/epoch' print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2",
"Test RMSE: {:.4f}' print(log.format(np.mean(amae), np.mean(amape), np.mean(armse))) torch.save(engine.model.state_dict(), args.save + \"_exp\" + str(args.expid) +",
"amape.append(metrics[1]) armse.append(metrics[2]) log = 'On average over 12 horizons, Test MAE: {:.4f}, Test",
"= [] s1 = time.time() for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()): testx =",
"trainer parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda:0', help='') parser.add_argument('--data', type=str, default='data/METR-LA', help='data path')",
"time.time() for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1,",
"mvalid_rmse = np.mean(valid_rmse) his_loss.append(mvalid_loss) log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE:",
"is\", str(round(his_loss[bestid], 4))) amae = [] amape = [] armse = [] for",
"MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(i + 1, metrics[0], metrics[1], metrics[2])) amae.append(metrics[0]) amape.append(metrics[1])",
"log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}'",
"args.print_every == 0: log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f},",
"type=str, default='cuda:0', help='') parser.add_argument('--data', type=str, default='data/METR-LA', help='data path') parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data",
"enumerate(dataloader['test_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) with torch.no_grad(): preds = engine.model(testx).transpose(1,",
"average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'",
"trainy = torch.Tensor(y).to(device) trainy = trainy.transpose(1, 3) metrics = engine.train(trainx, trainy[:, 0, :,",
"torch.device(args.device) sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata, args.adjtype) dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)",
"if args.randomadj: adjinit = None else: adjinit = supports[0] if args.aptonly: supports =",
"{:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f},",
"rate') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay rate') parser.add_argument('--epochs', type=int, default=50, help='') parser.add_argument('--print_every', type=int,",
"0: # lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10))) # for",
"10 == 0: # lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10)))",
"help='adj data path') parser.add_argument('--adjtype', type=str, default='doubletransition', help='adj type') parser.add_argument('--gcn_bool', action='store_true', help='whether to add",
"help='whether add adaptive adj') parser.add_argument('--randomadj', action='store_true', help='whether random initialize adaptive adj') parser.add_argument('--seq_length', type=int,",
"type=float, default=0.001, help='learning rate') parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight",
":, i]) real = realy[:, :, i] metrics = util.metric(pred, real) log =",
"Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch' print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse,",
"(x, y) in enumerate(dataloader['val_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) testy =",
"= util.metric(pred, real) log = 'Evaluate best model on test data for horizon",
"torch.Tensor(x).to(device) trainx = trainx.transpose(1, 3) trainy = torch.Tensor(y).to(device) trainy = trainy.transpose(1, 3) metrics",
"testy = testy.transpose(1, 3) metrics = engine.eval(testx, testy[:, 0, :, :]) valid_loss.append(metrics[0]) valid_mape.append(metrics[1])",
"metrics[2])) amae.append(metrics[0]) amape.append(metrics[1]) armse.append(metrics[2]) log = 'On average over 12 horizons, Test MAE:",
"= engine.train(trainx, trainy[:, 0, :, :]) train_loss.append(metrics[0]) train_mape.append(metrics[1]) train_rmse.append(metrics[2]) if iter % args.print_every",
"trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout, args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj, adjinit)",
"= np.mean(valid_rmse) his_loss.append(mvalid_loss) log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f},",
"(i // 10))) # for g in engine.optimizer.param_groups: # g['lr'] = lr train_loss",
"valid_rmse = [] s1 = time.time() for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()): testx",
"# np.random.seed(args.seed) # load data device = torch.device(args.device) sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata,",
"np.mean(valid_mape) mvalid_rmse = np.mean(valid_rmse) his_loss.append(mvalid_loss) log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train",
"t1 = time.time() main() t2 = time.time() print(\"Total time spent: {:.4f}\".format(t2 - t1))",
"time.time() log = 'Epoch: {:03d}, Inference Time: {:.4f} secs' print(log.format(i, (s2 - s1)))",
"parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data path') parser.add_argument('--adjtype', type=str, default='doubletransition', help='adj type') parser.add_argument('--gcn_bool', action='store_true',",
"model is\", str(round(his_loss[bestid], 4))) amae = [] amape = [] armse = []",
"[] for i in range(12): pred = scaler.inverse_transform(yhat[:, :, i]) real = realy[:,",
"engine.model(testx).transpose(1, 3) outputs.append(preds.squeeze()) yhat = torch.cat(outputs, dim=0) yhat = yhat[:realy.size(0), ...] print(\"Training finished\")",
"= 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}' print(log.format(iter,",
"path') parser.add_argument('--adjtype', type=str, default='doubletransition', help='adj type') parser.add_argument('--gcn_bool', action='store_true', help='whether to add graph convolution",
"(x, y) in enumerate(dataloader['train_loader'].get_iterator()): trainx = torch.Tensor(x).to(device) trainx = trainx.transpose(1, 3) trainy =",
"3) metrics = engine.eval(testx, testy[:, 0, :, :]) valid_loss.append(metrics[0]) valid_mape.append(metrics[1]) valid_rmse.append(metrics[2]) s2 =",
"y) in enumerate(dataloader['train_loader'].get_iterator()): trainx = torch.Tensor(x).to(device) trainx = trainx.transpose(1, 3) trainy = torch.Tensor(y).to(device)",
"np.mean(valid_rmse) his_loss.append(mvalid_loss) log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train",
"= [] amape = [] armse = [] for i in range(12): pred",
"= torch.Tensor(x).to(device) testx = testx.transpose(1, 3) with torch.no_grad(): preds = engine.model(testx).transpose(1, 3) outputs.append(preds.squeeze())",
"default=207, help='number of nodes') parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--learning_rate', type=float, default=0.001, help='learning",
"3) testy = torch.Tensor(y).to(device) testy = testy.transpose(1, 3) metrics = engine.eval(testx, testy[:, 0,",
"realy = torch.Tensor(dataloader['y_test']).to(device) realy = realy.transpose(1, 3)[:, 0, :, :] for iter, (x,",
"trainx = trainx.transpose(1, 3) trainy = torch.Tensor(y).to(device) trainy = trainy.transpose(1, 3) metrics =",
"i in range(12): pred = scaler.inverse_transform(yhat[:, :, i]) real = realy[:, :, i]",
"\"_epoch_\" + str(bestid + 1) + \"_\" + str(round(his_loss[bestid], 2)) + \".pth\")) outputs",
"torch.cat(outputs, dim=0) yhat = yhat[:realy.size(0), ...] print(\"Training finished\") print(\"The valid loss on best",
"torch.save(engine.model.state_dict(), args.save + \"_epoch_\" + str(i) + \"_\" + str(round(mvalid_loss, 2)) + \".pth\")",
"args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj, adjinit) print(\"start training...\") his_loss = [] val_time",
"= trainx.transpose(1, 3) trainy = torch.Tensor(y).to(device) trainy = trainy.transpose(1, 3) metrics = engine.train(trainx,",
"...] print(\"Training finished\") print(\"The valid loss on best model is\", str(round(his_loss[bestid], 4))) amae",
"- s1) mtrain_loss = np.mean(train_loss) mtrain_mape = np.mean(train_mape) mtrain_rmse = np.mean(train_rmse) mvalid_loss =",
"Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE:",
"Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training",
"parser.add_argument('--epochs', type=int, default=50, help='') parser.add_argument('--print_every', type=int, default=50, help='') # parser.add_argument('--seed',type=int,default=99,help='random seed') parser.add_argument('--save', type=str,",
"3) outputs.append(preds.squeeze()) yhat = torch.cat(outputs, dim=0) yhat = yhat[:realy.size(0), ...] print(\"Training finished\") print(\"The",
"yhat[:realy.size(0), ...] print(\"Training finished\") print(\"The valid loss on best model is\", str(round(his_loss[bestid], 4)))",
"= [] train_rmse = [] t1 = time.time() dataloader['train_loader'].shuffle() for iter, (x, y)",
"for i in adj_mx] print(args) if args.randomadj: adjinit = None else: adjinit =",
"= argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda:0', help='') parser.add_argument('--data', type=str, default='data/METR-LA', help='data path') parser.add_argument('--adjdata', type=str,",
"sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata, args.adjtype) dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size) scaler =",
"Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch' print(log.format(i, mtrain_loss,",
"Time: {:.4f} secs\".format(np.mean(val_time))) # testing bestid = np.argmin(his_loss) engine.model.load_state_dict( torch.load(args.save + \"_epoch_\" +",
"valid loss on best model is\", str(round(his_loss[bestid], 4))) amae = [] amape =",
"real) log = 'Evaluate best model on test data for horizon {:d}, Test",
"add adaptive adj') parser.add_argument('--randomadj', action='store_true', help='whether random initialize adaptive adj') parser.add_argument('--seq_length', type=int, default=12,",
"lr train_loss = [] train_mape = [] train_rmse = [] t1 = time.time()",
"print(\"Average Inference Time: {:.4f} secs\".format(np.mean(val_time))) # testing bestid = np.argmin(his_loss) engine.model.load_state_dict( torch.load(args.save +",
"valid_loss.append(metrics[0]) valid_mape.append(metrics[1]) valid_rmse.append(metrics[2]) s2 = time.time() log = 'Epoch: {:03d}, Inference Time: {:.4f}",
"train_loss = [] train_mape = [] train_rmse = [] t1 = time.time() dataloader['train_loader'].shuffle()",
"train_loss[-1], train_mape[-1], train_rmse[-1])) t2 = time.time() train_time.append(t2 - t1) # validation valid_loss =",
"initialize adaptive adj') parser.add_argument('--seq_length', type=int, default=12, help='') parser.add_argument('--nhid', type=int, default=32, help='') parser.add_argument('--in_dim', type=int,",
"mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1))) torch.save(engine.model.state_dict(), args.save + \"_epoch_\"",
"{:.4f}, Test RMSE: {:.4f}' print(log.format(np.mean(amae), np.mean(amape), np.mean(armse))) torch.save(engine.model.state_dict(), args.save + \"_exp\" + str(args.expid)",
"metrics[1], metrics[2])) amae.append(metrics[0]) amape.append(metrics[1]) armse.append(metrics[2]) log = 'On average over 12 horizons, Test",
"matplotlib.pyplot as plt from engine import trainer parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda:0',",
"metrics = util.metric(pred, real) log = 'Evaluate best model on test data for",
"4))) amae = [] amape = [] armse = [] for i in",
"= dataloader['scaler'] supports = [torch.tensor(i).to(device) for i in adj_mx] print(args) if args.randomadj: adjinit",
"[] realy = torch.Tensor(dataloader['y_test']).to(device) realy = realy.transpose(1, 3)[:, 0, :, :] for iter,",
"args.save + \"_epoch_\" + str(i) + \"_\" + str(round(mvalid_loss, 2)) + \".pth\") print(\"Average",
"type=int, default=12, help='') parser.add_argument('--nhid', type=int, default=32, help='') parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension') parser.add_argument('--num_nodes',",
"= lr train_loss = [] train_mape = [] train_rmse = [] t1 =",
"% 10 == 0: # lr = max(0.000002,args.learning_rate * (0.1 ** (i //",
"Train MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid",
"torch.save(engine.model.state_dict(), args.save + \"_exp\" + str(args.expid) + \"_best_\" + str(round(his_loss[bestid], 2)) + \".pth\")",
"\".pth\") print(\"Average Training Time: {:.4f} secs/epoch\".format(np.mean(train_time))) print(\"Average Inference Time: {:.4f} secs\".format(np.mean(val_time))) # testing",
"adj') parser.add_argument('--randomadj', action='store_true', help='whether random initialize adaptive adj') parser.add_argument('--seq_length', type=int, default=12, help='') parser.add_argument('--nhid',",
"parser.add_argument('--adjtype', type=str, default='doubletransition', help='adj type') parser.add_argument('--gcn_bool', action='store_true', help='whether to add graph convolution layer')",
"scaler.inverse_transform(yhat[:, :, i]) real = realy[:, :, i] metrics = util.metric(pred, real) log",
"Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}' print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1]))",
"amae.append(metrics[0]) amape.append(metrics[1]) armse.append(metrics[2]) log = 'On average over 12 horizons, Test MAE: {:.4f},",
"= time.time() dataloader['train_loader'].shuffle() for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()): trainx = torch.Tensor(x).to(device) trainx",
"help='adj type') parser.add_argument('--gcn_bool', action='store_true', help='whether to add graph convolution layer') parser.add_argument('--aptonly', action='store_true', help='whether",
"dim=0) yhat = yhat[:realy.size(0), ...] print(\"Training finished\") print(\"The valid loss on best model",
"Training Time: {:.4f} secs/epoch\".format(np.mean(train_time))) print(\"Average Inference Time: {:.4f} secs\".format(np.mean(val_time))) # testing bestid =",
"device, supports, args.gcn_bool, args.addaptadj, adjinit) print(\"start training...\") his_loss = [] val_time = []",
"np.mean(armse))) torch.save(engine.model.state_dict(), args.save + \"_exp\" + str(args.expid) + \"_best_\" + str(round(his_loss[bestid], 2)) +",
"= [] realy = torch.Tensor(dataloader['y_test']).to(device) realy = realy.transpose(1, 3)[:, 0, :, :] for",
"# validation valid_loss = [] valid_mape = [] valid_rmse = [] s1 =",
"{:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f},",
"util import matplotlib.pyplot as plt from engine import trainer parser = argparse.ArgumentParser() parser.add_argument('--device',",
"= supports[0] if args.aptonly: supports = None engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes,",
"rate') parser.add_argument('--epochs', type=int, default=50, help='') parser.add_argument('--print_every', type=int, default=50, help='') # parser.add_argument('--seed',type=int,default=99,help='random seed') parser.add_argument('--save',",
"else: adjinit = supports[0] if args.aptonly: supports = None engine = trainer(scaler, args.in_dim,",
"= [] armse = [] for i in range(12): pred = scaler.inverse_transform(yhat[:, :,",
"range(1, args.epochs + 1): # if i % 10 == 0: # lr",
"args.epochs + 1): # if i % 10 == 0: # lr =",
"0, :, :]) train_loss.append(metrics[0]) train_mape.append(metrics[1]) train_rmse.append(metrics[2]) if iter % args.print_every == 0: log",
"= np.argmin(his_loss) engine.model.load_state_dict( torch.load(args.save + \"_epoch_\" + str(bestid + 1) + \"_\" +",
"iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) with",
"{:.4f}' print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1])) t2 = time.time() train_time.append(t2 - t1) # validation",
"train_loss.append(metrics[0]) train_mape.append(metrics[1]) train_rmse.append(metrics[2]) if iter % args.print_every == 0: log = 'Iter: {:03d},",
"= util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size) scaler = dataloader['scaler'] supports = [torch.tensor(i).to(device) for i",
"Time: {:.4f} secs/epoch\".format(np.mean(train_time))) print(\"Average Inference Time: {:.4f} secs\".format(np.mean(val_time))) # testing bestid = np.argmin(his_loss)",
"[] for i in range(1, args.epochs + 1): # if i % 10",
"Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch' print(log.format(i,",
"in range(12): pred = scaler.inverse_transform(yhat[:, :, i]) real = realy[:, :, i] metrics",
"{:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(i + 1,",
"= [] for i in range(1, args.epochs + 1): # if i %",
"for i in range(12): pred = scaler.inverse_transform(yhat[:, :, i]) real = realy[:, :,",
"help='data path') parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data path') parser.add_argument('--adjtype', type=str, default='doubletransition', help='adj type')",
"args.addaptadj, adjinit) print(\"start training...\") his_loss = [] val_time = [] train_time = []",
"torch.load(args.save + \"_epoch_\" + str(bestid + 1) + \"_\" + str(round(his_loss[bestid], 2)) +",
"nodes') parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') parser.add_argument('--dropout', type=float,",
"[] t1 = time.time() dataloader['train_loader'].shuffle() for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()): trainx =",
"help='save path') parser.add_argument('--expid', type=int, default=1, help='experiment id') args = parser.parse_args() def main(): #",
"np.mean(valid_loss) mvalid_mape = np.mean(valid_mape) mvalid_rmse = np.mean(valid_rmse) his_loss.append(mvalid_loss) log = 'Epoch: {:03d}, Train",
"parser.add_argument('--gcn_bool', action='store_true', help='whether to add graph convolution layer') parser.add_argument('--aptonly', action='store_true', help='whether only adaptive",
"help='') parser.add_argument('--nhid', type=int, default=32, help='') parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension') parser.add_argument('--num_nodes', type=int, default=207,",
"main(): # set seed # torch.manual_seed(args.seed) # np.random.seed(args.seed) # load data device =",
"= yhat[:realy.size(0), ...] print(\"Training finished\") print(\"The valid loss on best model is\", str(round(his_loss[bestid],",
"args.batch_size, args.batch_size) scaler = dataloader['scaler'] supports = [torch.tensor(i).to(device) for i in adj_mx] print(args)",
"testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) with torch.no_grad(): preds = engine.model(testx).transpose(1, 3)",
"in enumerate(dataloader['train_loader'].get_iterator()): trainx = torch.Tensor(x).to(device) trainx = trainx.transpose(1, 3) trainy = torch.Tensor(y).to(device) trainy",
"for g in engine.optimizer.param_groups: # g['lr'] = lr train_loss = [] train_mape =",
"(t2 - t1))) torch.save(engine.model.state_dict(), args.save + \"_epoch_\" + str(i) + \"_\" + str(round(mvalid_loss,",
"with torch.no_grad(): preds = engine.model(testx).transpose(1, 3) outputs.append(preds.squeeze()) yhat = torch.cat(outputs, dim=0) yhat =",
"import numpy as np import argparse import time import util import matplotlib.pyplot as",
"+ \"_best_\" + str(round(his_loss[bestid], 2)) + \".pth\") if __name__ == \"__main__\": t1 =",
"help='') parser.add_argument('--data', type=str, default='data/METR-LA', help='data path') parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data path') parser.add_argument('--adjtype',",
"best model is\", str(round(his_loss[bestid], 4))) amae = [] amape = [] armse =",
"max(0.000002,args.learning_rate * (0.1 ** (i // 10))) # for g in engine.optimizer.param_groups: #",
"g['lr'] = lr train_loss = [] train_mape = [] train_rmse = [] t1",
"{:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch'",
"if i % 10 == 0: # lr = max(0.000002,args.learning_rate * (0.1 **",
"graph convolution layer') parser.add_argument('--aptonly', action='store_true', help='whether only adaptive adj') parser.add_argument('--addaptadj', action='store_true', help='whether add",
"= [] train_time = [] for i in range(1, args.epochs + 1): #",
"# torch.manual_seed(args.seed) # np.random.seed(args.seed) # load data device = torch.device(args.device) sensor_ids, sensor_id_to_ind, adj_mx",
"default=50, help='') # parser.add_argument('--seed',type=int,default=99,help='random seed') parser.add_argument('--save', type=str, default='./garage/metr', help='save path') parser.add_argument('--expid', type=int, default=1,",
"for i in range(1, args.epochs + 1): # if i % 10 ==",
"secs\".format(np.mean(val_time))) # testing bestid = np.argmin(his_loss) engine.model.load_state_dict( torch.load(args.save + \"_epoch_\" + str(bestid +",
"% args.print_every == 0: log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE:",
"0, :, :]) valid_loss.append(metrics[0]) valid_mape.append(metrics[1]) valid_rmse.append(metrics[2]) s2 = time.time() log = 'Epoch: {:03d},",
"= [torch.tensor(i).to(device) for i in adj_mx] print(args) if args.randomadj: adjinit = None else:",
"armse.append(metrics[2]) log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE:",
"plt from engine import trainer parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda:0', help='') parser.add_argument('--data',",
"args.weight_decay, device, supports, args.gcn_bool, args.addaptadj, adjinit) print(\"start training...\") his_loss = [] val_time =",
"default=2, help='inputs dimension') parser.add_argument('--num_nodes', type=int, default=207, help='number of nodes') parser.add_argument('--batch_size', type=int, default=64, help='batch",
"log = 'Epoch: {:03d}, Inference Time: {:.4f} secs' print(log.format(i, (s2 - s1))) val_time.append(s2",
"testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) testy = torch.Tensor(y).to(device) testy = testy.transpose(1,",
"# for g in engine.optimizer.param_groups: # g['lr'] = lr train_loss = [] train_mape",
"= np.mean(valid_mape) mvalid_rmse = np.mean(valid_rmse) his_loss.append(mvalid_loss) log = 'Epoch: {:03d}, Train Loss: {:.4f},",
"MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(i + 1, metrics[0], metrics[1],",
"= [] valid_rmse = [] s1 = time.time() for iter, (x, y) in",
"= np.mean(train_mape) mtrain_rmse = np.mean(train_rmse) mvalid_loss = np.mean(valid_loss) mvalid_mape = np.mean(valid_mape) mvalid_rmse =",
"path') parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data path') parser.add_argument('--adjtype', type=str, default='doubletransition', help='adj type') parser.add_argument('--gcn_bool',",
"parser.add_argument('--seed',type=int,default=99,help='random seed') parser.add_argument('--save', type=str, default='./garage/metr', help='save path') parser.add_argument('--expid', type=int, default=1, help='experiment id') args",
"Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(i + 1, metrics[0], metrics[1], metrics[2])) amae.append(metrics[0])",
"print(log.format(i + 1, metrics[0], metrics[1], metrics[2])) amae.append(metrics[0]) amape.append(metrics[1]) armse.append(metrics[2]) log = 'On average",
"{:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}' print(log.format(iter, train_loss[-1], train_mape[-1],",
"trainy = trainy.transpose(1, 3) metrics = engine.train(trainx, trainy[:, 0, :, :]) train_loss.append(metrics[0]) train_mape.append(metrics[1])",
"= 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f},",
"supports[0] if args.aptonly: supports = None engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid,",
"- t1))) torch.save(engine.model.state_dict(), args.save + \"_epoch_\" + str(i) + \"_\" + str(round(mvalid_loss, 2))",
"supports, args.gcn_bool, args.addaptadj, adjinit) print(\"start training...\") his_loss = [] val_time = [] train_time",
"np import argparse import time import util import matplotlib.pyplot as plt from engine",
"decay rate') parser.add_argument('--epochs', type=int, default=50, help='') parser.add_argument('--print_every', type=int, default=50, help='') # parser.add_argument('--seed',type=int,default=99,help='random seed')",
"mvalid_mape = np.mean(valid_mape) mvalid_rmse = np.mean(valid_rmse) his_loss.append(mvalid_loss) log = 'Epoch: {:03d}, Train Loss:",
"= scaler.inverse_transform(yhat[:, :, i]) real = realy[:, :, i] metrics = util.metric(pred, real)",
"bestid = np.argmin(his_loss) engine.model.load_state_dict( torch.load(args.save + \"_epoch_\" + str(bestid + 1) + \"_\"",
"testx = testx.transpose(1, 3) with torch.no_grad(): preds = engine.model(testx).transpose(1, 3) outputs.append(preds.squeeze()) yhat =",
"valid_loss = [] valid_mape = [] valid_rmse = [] s1 = time.time() for",
"+ str(bestid + 1) + \"_\" + str(round(his_loss[bestid], 2)) + \".pth\")) outputs =",
"train_mape.append(metrics[1]) train_rmse.append(metrics[2]) if iter % args.print_every == 0: log = 'Iter: {:03d}, Train",
"print(\"start training...\") his_loss = [] val_time = [] train_time = [] for i",
"\"_\" + str(round(mvalid_loss, 2)) + \".pth\") print(\"Average Training Time: {:.4f} secs/epoch\".format(np.mean(train_time))) print(\"Average Inference",
"np.random.seed(args.seed) # load data device = torch.device(args.device) sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata, args.adjtype)",
"help='number of nodes') parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate')",
"load data device = torch.device(args.device) sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata, args.adjtype) dataloader =",
"parser.add_argument('--data', type=str, default='data/METR-LA', help='data path') parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data path') parser.add_argument('--adjtype', type=str,",
"sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata, args.adjtype) dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size) scaler",
"// 10))) # for g in engine.optimizer.param_groups: # g['lr'] = lr train_loss =",
"= np.mean(train_rmse) mvalid_loss = np.mean(valid_loss) mvalid_mape = np.mean(valid_mape) mvalid_rmse = np.mean(valid_rmse) his_loss.append(mvalid_loss) log",
"{:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(i + 1, metrics[0], metrics[1], metrics[2]))",
"help='learning rate') parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay rate')",
"enumerate(dataloader['val_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) testy = torch.Tensor(y).to(device) testy =",
"help='whether random initialize adaptive adj') parser.add_argument('--seq_length', type=int, default=12, help='') parser.add_argument('--nhid', type=int, default=32, help='')",
"g in engine.optimizer.param_groups: # g['lr'] = lr train_loss = [] train_mape = []",
"type=str, default='./garage/metr', help='save path') parser.add_argument('--expid', type=int, default=1, help='experiment id') args = parser.parse_args() def",
"3) metrics = engine.train(trainx, trainy[:, 0, :, :]) train_loss.append(metrics[0]) train_mape.append(metrics[1]) train_rmse.append(metrics[2]) if iter",
"mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1))) torch.save(engine.model.state_dict(), args.save + \"_epoch_\" + str(i) +",
"horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(i +",
"type=str, default='data/METR-LA', help='data path') parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data path') parser.add_argument('--adjtype', type=str, default='doubletransition',",
"1, metrics[0], metrics[1], metrics[2])) amae.append(metrics[0]) amape.append(metrics[1]) armse.append(metrics[2]) log = 'On average over 12",
"print(log.format(i, (s2 - s1))) val_time.append(s2 - s1) mtrain_loss = np.mean(train_loss) mtrain_mape = np.mean(train_mape)",
"val_time = [] train_time = [] for i in range(1, args.epochs + 1):",
"testx = testx.transpose(1, 3) testy = torch.Tensor(y).to(device) testy = testy.transpose(1, 3) metrics =",
"finished\") print(\"The valid loss on best model is\", str(round(his_loss[bestid], 4))) amae = []",
"+ 1, metrics[0], metrics[1], metrics[2])) amae.append(metrics[0]) amape.append(metrics[1]) armse.append(metrics[2]) log = 'On average over",
"help='experiment id') args = parser.parse_args() def main(): # set seed # torch.manual_seed(args.seed) #",
"device = torch.device(args.device) sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata, args.adjtype) dataloader = util.load_dataset(args.data, args.batch_size,",
"args.aptonly: supports = None engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout, args.learning_rate,",
"RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time:",
"if args.aptonly: supports = None engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout,",
":]) train_loss.append(metrics[0]) train_mape.append(metrics[1]) train_rmse.append(metrics[2]) if iter % args.print_every == 0: log = 'Iter:",
"= torch.Tensor(x).to(device) trainx = trainx.transpose(1, 3) trainy = torch.Tensor(y).to(device) trainy = trainy.transpose(1, 3)",
"1): # if i % 10 == 0: # lr = max(0.000002,args.learning_rate *",
"adaptive adj') parser.add_argument('--randomadj', action='store_true', help='whether random initialize adaptive adj') parser.add_argument('--seq_length', type=int, default=12, help='')",
"i in adj_mx] print(args) if args.randomadj: adjinit = None else: adjinit = supports[0]",
"== \"__main__\": t1 = time.time() main() t2 = time.time() print(\"Total time spent: {:.4f}\".format(t2",
"None engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout, args.learning_rate, args.weight_decay, device, supports,",
"args.num_nodes, args.nhid, args.dropout, args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj, adjinit) print(\"start training...\") his_loss",
"log = 'Evaluate best model on test data for horizon {:d}, Test MAE:",
":, :] for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx =",
"of nodes') parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') parser.add_argument('--dropout',",
"= torch.Tensor(x).to(device) testx = testx.transpose(1, 3) testy = torch.Tensor(y).to(device) testy = testy.transpose(1, 3)",
"on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test",
"[] val_time = [] train_time = [] for i in range(1, args.epochs +",
"<reponame>liujiachang/Graph-WaveNet import torch import numpy as np import argparse import time import util",
"Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(np.mean(amae), np.mean(amape), np.mean(armse))) torch.save(engine.model.state_dict(), args.save + \"_exp\"",
"print(\"Training finished\") print(\"The valid loss on best model is\", str(round(his_loss[bestid], 4))) amae =",
"y) in enumerate(dataloader['test_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) with torch.no_grad(): preds",
"import util import matplotlib.pyplot as plt from engine import trainer parser = argparse.ArgumentParser()",
"dimension') parser.add_argument('--num_nodes', type=int, default=207, help='number of nodes') parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--learning_rate',",
"layer') parser.add_argument('--aptonly', action='store_true', help='whether only adaptive adj') parser.add_argument('--addaptadj', action='store_true', help='whether add adaptive adj')",
"testy.transpose(1, 3) metrics = engine.eval(testx, testy[:, 0, :, :]) valid_loss.append(metrics[0]) valid_mape.append(metrics[1]) valid_rmse.append(metrics[2]) s2",
"args.dropout, args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj, adjinit) print(\"start training...\") his_loss = []",
"{:.4f}' print(log.format(np.mean(amae), np.mean(amape), np.mean(armse))) torch.save(engine.model.state_dict(), args.save + \"_exp\" + str(args.expid) + \"_best_\" +",
"adjinit = supports[0] if args.aptonly: supports = None engine = trainer(scaler, args.in_dim, args.seq_length,",
"horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(np.mean(amae), np.mean(amape), np.mean(armse)))",
"= util.load_adj(args.adjdata, args.adjtype) dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size) scaler = dataloader['scaler'] supports",
"+ str(round(his_loss[bestid], 2)) + \".pth\") if __name__ == \"__main__\": t1 = time.time() main()",
"i]) real = realy[:, :, i] metrics = util.metric(pred, real) log = 'Evaluate",
"import torch import numpy as np import argparse import time import util import",
"parser.add_argument('--save', type=str, default='./garage/metr', help='save path') parser.add_argument('--expid', type=int, default=1, help='experiment id') args = parser.parse_args()",
"= None engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout, args.learning_rate, args.weight_decay, device,",
"for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()): trainx = torch.Tensor(x).to(device) trainx = trainx.transpose(1, 3)",
"MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(np.mean(amae), np.mean(amape), np.mean(armse))) torch.save(engine.model.state_dict(), args.save + \"_exp\" +",
"real = realy[:, :, i] metrics = util.metric(pred, real) log = 'Evaluate best",
"y) in enumerate(dataloader['val_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) testy = torch.Tensor(y).to(device)",
"import matplotlib.pyplot as plt from engine import trainer parser = argparse.ArgumentParser() parser.add_argument('--device', type=str,",
"dataloader['scaler'] supports = [torch.tensor(i).to(device) for i in adj_mx] print(args) if args.randomadj: adjinit =",
"adj') parser.add_argument('--addaptadj', action='store_true', help='whether add adaptive adj') parser.add_argument('--randomadj', action='store_true', help='whether random initialize adaptive",
"default=1, help='experiment id') args = parser.parse_args() def main(): # set seed # torch.manual_seed(args.seed)",
"in engine.optimizer.param_groups: # g['lr'] = lr train_loss = [] train_mape = [] train_rmse",
"torch.manual_seed(args.seed) # np.random.seed(args.seed) # load data device = torch.device(args.device) sensor_ids, sensor_id_to_ind, adj_mx =",
"action='store_true', help='whether add adaptive adj') parser.add_argument('--randomadj', action='store_true', help='whether random initialize adaptive adj') parser.add_argument('--seq_length',",
"torch.Tensor(y).to(device) testy = testy.transpose(1, 3) metrics = engine.eval(testx, testy[:, 0, :, :]) valid_loss.append(metrics[0])",
"metrics = engine.eval(testx, testy[:, 0, :, :]) valid_loss.append(metrics[0]) valid_mape.append(metrics[1]) valid_rmse.append(metrics[2]) s2 = time.time()",
"s1) mtrain_loss = np.mean(train_loss) mtrain_mape = np.mean(train_mape) mtrain_rmse = np.mean(train_rmse) mvalid_loss = np.mean(valid_loss)",
"argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda:0', help='') parser.add_argument('--data', type=str, default='data/METR-LA', help='data path') parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl',",
"help='inputs dimension') parser.add_argument('--num_nodes', type=int, default=207, help='number of nodes') parser.add_argument('--batch_size', type=int, default=64, help='batch size')",
"args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout, args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj, adjinit) print(\"start",
"= engine.model(testx).transpose(1, 3) outputs.append(preds.squeeze()) yhat = torch.cat(outputs, dim=0) yhat = yhat[:realy.size(0), ...] print(\"Training",
"help='batch size') parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate') parser.add_argument('--weight_decay',",
"= [] t1 = time.time() dataloader['train_loader'].shuffle() for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()): trainx",
"valid_mape.append(metrics[1]) valid_rmse.append(metrics[2]) s2 = time.time() log = 'Epoch: {:03d}, Inference Time: {:.4f} secs'",
"action='store_true', help='whether to add graph convolution layer') parser.add_argument('--aptonly', action='store_true', help='whether only adaptive adj')",
":, i] metrics = util.metric(pred, real) log = 'Evaluate best model on test",
"set seed # torch.manual_seed(args.seed) # np.random.seed(args.seed) # load data device = torch.device(args.device) sensor_ids,",
"Time: {:.4f} secs' print(log.format(i, (s2 - s1))) val_time.append(s2 - s1) mtrain_loss = np.mean(train_loss)",
"t1 = time.time() dataloader['train_loader'].shuffle() for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()): trainx = torch.Tensor(x).to(device)",
"type=int, default=207, help='number of nodes') parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--learning_rate', type=float, default=0.001,",
"{:.4f}' print(log.format(i + 1, metrics[0], metrics[1], metrics[2])) amae.append(metrics[0]) amape.append(metrics[1]) armse.append(metrics[2]) log = 'On",
"+ 1) + \"_\" + str(round(his_loss[bestid], 2)) + \".pth\")) outputs = [] realy",
"= torch.cat(outputs, dim=0) yhat = yhat[:realy.size(0), ...] print(\"Training finished\") print(\"The valid loss on",
"\"_epoch_\" + str(i) + \"_\" + str(round(mvalid_loss, 2)) + \".pth\") print(\"Average Training Time:",
"# set seed # torch.manual_seed(args.seed) # np.random.seed(args.seed) # load data device = torch.device(args.device)",
"parser.add_argument('--print_every', type=int, default=50, help='') # parser.add_argument('--seed',type=int,default=99,help='random seed') parser.add_argument('--save', type=str, default='./garage/metr', help='save path') parser.add_argument('--expid',",
"from engine import trainer parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda:0', help='') parser.add_argument('--data', type=str,",
"data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'",
"trainy[:, 0, :, :]) train_loss.append(metrics[0]) train_mape.append(metrics[1]) train_rmse.append(metrics[2]) if iter % args.print_every == 0:",
"Time: {:.4f}/epoch' print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1))) torch.save(engine.model.state_dict(),",
"{:03d}, Inference Time: {:.4f} secs' print(log.format(i, (s2 - s1))) val_time.append(s2 - s1) mtrain_loss",
"util.load_adj(args.adjdata, args.adjtype) dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size) scaler = dataloader['scaler'] supports =",
"{:.4f}, Training Time: {:.4f}/epoch' print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 -",
"[] s1 = time.time() for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()): testx = torch.Tensor(x).to(device)",
"str(round(his_loss[bestid], 2)) + \".pth\")) outputs = [] realy = torch.Tensor(dataloader['y_test']).to(device) realy = realy.transpose(1,",
"parser.add_argument('--seq_length', type=int, default=12, help='') parser.add_argument('--nhid', type=int, default=32, help='') parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension')",
"import trainer parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda:0', help='') parser.add_argument('--data', type=str, default='data/METR-LA', help='data",
"amae = [] amape = [] armse = [] for i in range(12):",
"default='data/sensor_graph/adj_mx.pkl', help='adj data path') parser.add_argument('--adjtype', type=str, default='doubletransition', help='adj type') parser.add_argument('--gcn_bool', action='store_true', help='whether to",
"0: log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE:",
"only adaptive adj') parser.add_argument('--addaptadj', action='store_true', help='whether add adaptive adj') parser.add_argument('--randomadj', action='store_true', help='whether random",
"lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10))) # for g in",
"- t1) # validation valid_loss = [] valid_mape = [] valid_rmse = []",
"metrics = engine.train(trainx, trainy[:, 0, :, :]) train_loss.append(metrics[0]) train_mape.append(metrics[1]) train_rmse.append(metrics[2]) if iter %",
"import time import util import matplotlib.pyplot as plt from engine import trainer parser",
"s2 = time.time() log = 'Epoch: {:03d}, Inference Time: {:.4f} secs' print(log.format(i, (s2",
"adj_mx] print(args) if args.randomadj: adjinit = None else: adjinit = supports[0] if args.aptonly:",
"torch.Tensor(y).to(device) trainy = trainy.transpose(1, 3) metrics = engine.train(trainx, trainy[:, 0, :, :]) train_loss.append(metrics[0])",
"util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size) scaler = dataloader['scaler'] supports = [torch.tensor(i).to(device) for i in",
"yhat = torch.cat(outputs, dim=0) yhat = yhat[:realy.size(0), ...] print(\"Training finished\") print(\"The valid loss",
"args.batch_size) scaler = dataloader['scaler'] supports = [torch.tensor(i).to(device) for i in adj_mx] print(args) if",
"3)[:, 0, :, :] for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()): testx = torch.Tensor(x).to(device)",
"default='data/METR-LA', help='data path') parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data path') parser.add_argument('--adjtype', type=str, default='doubletransition', help='adj",
"mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1))) torch.save(engine.model.state_dict(), args.save + \"_epoch_\" + str(i)",
"log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f},",
"iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()): trainx = torch.Tensor(x).to(device) trainx = trainx.transpose(1, 3) trainy",
"= 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test",
"his_loss = [] val_time = [] train_time = [] for i in range(1,",
"+ \".pth\") if __name__ == \"__main__\": t1 = time.time() main() t2 = time.time()",
"engine.eval(testx, testy[:, 0, :, :]) valid_loss.append(metrics[0]) valid_mape.append(metrics[1]) valid_rmse.append(metrics[2]) s2 = time.time() log =",
"= engine.eval(testx, testy[:, 0, :, :]) valid_loss.append(metrics[0]) valid_mape.append(metrics[1]) valid_rmse.append(metrics[2]) s2 = time.time() log",
"seed') parser.add_argument('--save', type=str, default='./garage/metr', help='save path') parser.add_argument('--expid', type=int, default=1, help='experiment id') args =",
"1) + \"_\" + str(round(his_loss[bestid], 2)) + \".pth\")) outputs = [] realy =",
"best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE:",
"to add graph convolution layer') parser.add_argument('--aptonly', action='store_true', help='whether only adaptive adj') parser.add_argument('--addaptadj', action='store_true',",
"path') parser.add_argument('--expid', type=int, default=1, help='experiment id') args = parser.parse_args() def main(): # set",
"= trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout, args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,",
"engine import trainer parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda:0', help='') parser.add_argument('--data', type=str, default='data/METR-LA',",
"print(\"Average Training Time: {:.4f} secs/epoch\".format(np.mean(train_time))) print(\"Average Inference Time: {:.4f} secs\".format(np.mean(val_time))) # testing bestid",
"\".pth\") if __name__ == \"__main__\": t1 = time.time() main() t2 = time.time() print(\"Total",
"import argparse import time import util import matplotlib.pyplot as plt from engine import",
"type=int, default=32, help='') parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension') parser.add_argument('--num_nodes', type=int, default=207, help='number of",
"parser.add_argument('--addaptadj', action='store_true', help='whether add adaptive adj') parser.add_argument('--randomadj', action='store_true', help='whether random initialize adaptive adj')",
"print(log.format(np.mean(amae), np.mean(amape), np.mean(armse))) torch.save(engine.model.state_dict(), args.save + \"_exp\" + str(args.expid) + \"_best_\" + str(round(his_loss[bestid],",
"realy = realy.transpose(1, 3)[:, 0, :, :] for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):",
"size') parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate') parser.add_argument('--weight_decay', type=float,",
"[] armse = [] for i in range(12): pred = scaler.inverse_transform(yhat[:, :, i])",
"trainx = torch.Tensor(x).to(device) trainx = trainx.transpose(1, 3) trainy = torch.Tensor(y).to(device) trainy = trainy.transpose(1,",
":]) valid_loss.append(metrics[0]) valid_mape.append(metrics[1]) valid_rmse.append(metrics[2]) s2 = time.time() log = 'Epoch: {:03d}, Inference Time:",
"type=float, default=0.0001, help='weight decay rate') parser.add_argument('--epochs', type=int, default=50, help='') parser.add_argument('--print_every', type=int, default=50, help='')",
"type') parser.add_argument('--gcn_bool', action='store_true', help='whether to add graph convolution layer') parser.add_argument('--aptonly', action='store_true', help='whether only",
"train_rmse = [] t1 = time.time() dataloader['train_loader'].shuffle() for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()):",
"util.metric(pred, real) log = 'Evaluate best model on test data for horizon {:d},",
"= realy.transpose(1, 3)[:, 0, :, :] for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()): testx",
"'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}' print(log.format(iter, train_loss[-1],",
"training...\") his_loss = [] val_time = [] train_time = [] for i in",
"yhat = yhat[:realy.size(0), ...] print(\"Training finished\") print(\"The valid loss on best model is\",",
"2)) + \".pth\") print(\"Average Training Time: {:.4f} secs/epoch\".format(np.mean(train_time))) print(\"Average Inference Time: {:.4f} secs\".format(np.mean(val_time)))",
"{:.4f} secs\".format(np.mean(val_time))) # testing bestid = np.argmin(his_loss) engine.model.load_state_dict( torch.load(args.save + \"_epoch_\" + str(bestid",
"engine.train(trainx, trainy[:, 0, :, :]) train_loss.append(metrics[0]) train_mape.append(metrics[1]) train_rmse.append(metrics[2]) if iter % args.print_every ==",
"parser.add_argument('--device', type=str, default='cuda:0', help='') parser.add_argument('--data', type=str, default='data/METR-LA', help='data path') parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj",
"== 0: log = 'Iter: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train",
"np.mean(amape), np.mean(armse))) torch.save(engine.model.state_dict(), args.save + \"_exp\" + str(args.expid) + \"_best_\" + str(round(his_loss[bestid], 2))",
"[] valid_rmse = [] s1 = time.time() for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()):",
"[torch.tensor(i).to(device) for i in adj_mx] print(args) if args.randomadj: adjinit = None else: adjinit",
"default=0.3, help='dropout rate') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay rate') parser.add_argument('--epochs', type=int, default=50, help='')",
"None else: adjinit = supports[0] if args.aptonly: supports = None engine = trainer(scaler,",
"MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(np.mean(amae), np.mean(amape), np.mean(armse))) torch.save(engine.model.state_dict(), args.save",
"default='./garage/metr', help='save path') parser.add_argument('--expid', type=int, default=1, help='experiment id') args = parser.parse_args() def main():",
"* (0.1 ** (i // 10))) # for g in engine.optimizer.param_groups: # g['lr']",
"args.save + \"_exp\" + str(args.expid) + \"_best_\" + str(round(his_loss[bestid], 2)) + \".pth\") if",
"= torch.Tensor(y).to(device) testy = testy.transpose(1, 3) metrics = engine.eval(testx, testy[:, 0, :, :])",
"preds = engine.model(testx).transpose(1, 3) outputs.append(preds.squeeze()) yhat = torch.cat(outputs, dim=0) yhat = yhat[:realy.size(0), ...]",
"supports = [torch.tensor(i).to(device) for i in adj_mx] print(args) if args.randomadj: adjinit = None",
"0, :, :] for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx",
"data device = torch.device(args.device) sensor_ids, sensor_id_to_ind, adj_mx = util.load_adj(args.adjdata, args.adjtype) dataloader = util.load_dataset(args.data,",
"MAPE: {:.4f}, Train RMSE: {:.4f}' print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1])) t2 = time.time() train_time.append(t2",
"Train MAPE: {:.4f}, Train RMSE: {:.4f}' print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1])) t2 = time.time()",
"Training Time: {:.4f}/epoch' print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss, mvalid_mape, mvalid_rmse, (t2 - t1)))",
"'Epoch: {:03d}, Inference Time: {:.4f} secs' print(log.format(i, (s2 - s1))) val_time.append(s2 - s1)",
"[] valid_mape = [] valid_rmse = [] s1 = time.time() for iter, (x,",
"# lr = max(0.000002,args.learning_rate * (0.1 ** (i // 10))) # for g",
"= testx.transpose(1, 3) testy = torch.Tensor(y).to(device) testy = testy.transpose(1, 3) metrics = engine.eval(testx,",
"log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f},",
"+ str(round(his_loss[bestid], 2)) + \".pth\")) outputs = [] realy = torch.Tensor(dataloader['y_test']).to(device) realy =",
"= None else: adjinit = supports[0] if args.aptonly: supports = None engine =",
"str(round(his_loss[bestid], 4))) amae = [] amape = [] armse = [] for i",
"= time.time() for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx =",
"\"__main__\": t1 = time.time() main() t2 = time.time() print(\"Total time spent: {:.4f}\".format(t2 -",
"dataloader['train_loader'].shuffle() for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()): trainx = torch.Tensor(x).to(device) trainx = trainx.transpose(1,",
"mtrain_rmse = np.mean(train_rmse) mvalid_loss = np.mean(valid_loss) mvalid_mape = np.mean(valid_mape) mvalid_rmse = np.mean(valid_rmse) his_loss.append(mvalid_loss)",
"iter % args.print_every == 0: log = 'Iter: {:03d}, Train Loss: {:.4f}, Train",
"adjinit) print(\"start training...\") his_loss = [] val_time = [] train_time = [] for",
"mvalid_rmse, (t2 - t1))) torch.save(engine.model.state_dict(), args.save + \"_epoch_\" + str(i) + \"_\" +",
"adj') parser.add_argument('--seq_length', type=int, default=12, help='') parser.add_argument('--nhid', type=int, default=32, help='') parser.add_argument('--in_dim', type=int, default=2, help='inputs",
"parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay rate') parser.add_argument('--epochs', type=int, default=50, help='') parser.add_argument('--print_every', type=int, default=50,",
"\"_\" + str(round(his_loss[bestid], 2)) + \".pth\")) outputs = [] realy = torch.Tensor(dataloader['y_test']).to(device) realy",
"model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f},",
"Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE: {:.4f}' print(log.format(iter, train_loss[-1], train_mape[-1], train_rmse[-1])) t2",
"outputs = [] realy = torch.Tensor(dataloader['y_test']).to(device) realy = realy.transpose(1, 3)[:, 0, :, :]",
"type=str, default='doubletransition', help='adj type') parser.add_argument('--gcn_bool', action='store_true', help='whether to add graph convolution layer') parser.add_argument('--aptonly',",
"testx.transpose(1, 3) testy = torch.Tensor(y).to(device) testy = testy.transpose(1, 3) metrics = engine.eval(testx, testy[:,",
"s1 = time.time() for iter, (x, y) in enumerate(dataloader['val_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx",
"= np.mean(valid_loss) mvalid_mape = np.mean(valid_mape) mvalid_rmse = np.mean(valid_rmse) his_loss.append(mvalid_loss) log = 'Epoch: {:03d},",
"np.mean(train_loss) mtrain_mape = np.mean(train_mape) mtrain_rmse = np.mean(train_rmse) mvalid_loss = np.mean(valid_loss) mvalid_mape = np.mean(valid_mape)",
"testx.transpose(1, 3) with torch.no_grad(): preds = engine.model(testx).transpose(1, 3) outputs.append(preds.squeeze()) yhat = torch.cat(outputs, dim=0)",
"action='store_true', help='whether only adaptive adj') parser.add_argument('--addaptadj', action='store_true', help='whether add adaptive adj') parser.add_argument('--randomadj', action='store_true',",
"[] train_mape = [] train_rmse = [] t1 = time.time() dataloader['train_loader'].shuffle() for iter,",
"his_loss.append(mvalid_loss) log = 'Epoch: {:03d}, Train Loss: {:.4f}, Train MAPE: {:.4f}, Train RMSE:",
"adj_mx = util.load_adj(args.adjdata, args.adjtype) dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size) scaler = dataloader['scaler']",
"time.time() dataloader['train_loader'].shuffle() for iter, (x, y) in enumerate(dataloader['train_loader'].get_iterator()): trainx = torch.Tensor(x).to(device) trainx =",
"{:.4f} secs/epoch\".format(np.mean(train_time))) print(\"Average Inference Time: {:.4f} secs\".format(np.mean(val_time))) # testing bestid = np.argmin(his_loss) engine.model.load_state_dict(",
"np.argmin(his_loss) engine.model.load_state_dict( torch.load(args.save + \"_epoch_\" + str(bestid + 1) + \"_\" + str(round(his_loss[bestid],",
"__name__ == \"__main__\": t1 = time.time() main() t2 = time.time() print(\"Total time spent:",
"for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3)",
"MAPE: {:.4f}, Train RMSE: {:.4f}, Valid Loss: {:.4f}, Valid MAPE: {:.4f}, Valid RMSE:",
"default='cuda:0', help='') parser.add_argument('--data', type=str, default='data/METR-LA', help='data path') parser.add_argument('--adjdata', type=str, default='data/sensor_graph/adj_mx.pkl', help='adj data path')",
"2)) + \".pth\")) outputs = [] realy = torch.Tensor(dataloader['y_test']).to(device) realy = realy.transpose(1, 3)[:,",
"MAPE: {:.4f}, Valid RMSE: {:.4f}, Training Time: {:.4f}/epoch' print(log.format(i, mtrain_loss, mtrain_mape, mtrain_rmse, mvalid_loss,",
"help='') # parser.add_argument('--seed',type=int,default=99,help='random seed') parser.add_argument('--save', type=str, default='./garage/metr', help='save path') parser.add_argument('--expid', type=int, default=1, help='experiment",
"default=64, help='batch size') parser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate') parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate')",
"s1))) val_time.append(s2 - s1) mtrain_loss = np.mean(train_loss) mtrain_mape = np.mean(train_mape) mtrain_rmse = np.mean(train_rmse)",
"range(12): pred = scaler.inverse_transform(yhat[:, :, i]) real = realy[:, :, i] metrics =",
"realy.transpose(1, 3)[:, 0, :, :] for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()): testx =",
"print(\"The valid loss on best model is\", str(round(his_loss[bestid], 4))) amae = [] amape",
"(0.1 ** (i // 10))) # for g in engine.optimizer.param_groups: # g['lr'] =",
"= max(0.000002,args.learning_rate * (0.1 ** (i // 10))) # for g in engine.optimizer.param_groups:",
"train_rmse.append(metrics[2]) if iter % args.print_every == 0: log = 'Iter: {:03d}, Train Loss:",
"args.randomadj: adjinit = None else: adjinit = supports[0] if args.aptonly: supports = None",
"default='doubletransition', help='adj type') parser.add_argument('--gcn_bool', action='store_true', help='whether to add graph convolution layer') parser.add_argument('--aptonly', action='store_true',",
"print(args) if args.randomadj: adjinit = None else: adjinit = supports[0] if args.aptonly: supports",
"action='store_true', help='whether random initialize adaptive adj') parser.add_argument('--seq_length', type=int, default=12, help='') parser.add_argument('--nhid', type=int, default=32,",
"engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout, args.learning_rate, args.weight_decay, device, supports, args.gcn_bool,",
"id') args = parser.parse_args() def main(): # set seed # torch.manual_seed(args.seed) # np.random.seed(args.seed)",
"args.gcn_bool, args.addaptadj, adjinit) print(\"start training...\") his_loss = [] val_time = [] train_time =",
"type=int, default=1, help='experiment id') args = parser.parse_args() def main(): # set seed #",
"on best model is\", str(round(his_loss[bestid], 4))) amae = [] amape = [] armse",
"parser.add_argument('--aptonly', action='store_true', help='whether only adaptive adj') parser.add_argument('--addaptadj', action='store_true', help='whether add adaptive adj') parser.add_argument('--randomadj',",
"+ \".pth\") print(\"Average Training Time: {:.4f} secs/epoch\".format(np.mean(train_time))) print(\"Average Inference Time: {:.4f} secs\".format(np.mean(val_time))) #",
"Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}' print(log.format(i + 1, metrics[0],",
"realy[:, :, i] metrics = util.metric(pred, real) log = 'Evaluate best model on",
"RMSE: {:.4f}' print(log.format(i + 1, metrics[0], metrics[1], metrics[2])) amae.append(metrics[0]) amape.append(metrics[1]) armse.append(metrics[2]) log =",
"i in range(1, args.epochs + 1): # if i % 10 == 0:",
"parser.add_argument('--dropout', type=float, default=0.3, help='dropout rate') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay rate') parser.add_argument('--epochs', type=int,",
"type=float, default=0.3, help='dropout rate') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay rate') parser.add_argument('--epochs', type=int, default=50,",
"t2 = time.time() train_time.append(t2 - t1) # validation valid_loss = [] valid_mape =",
"help='') parser.add_argument('--in_dim', type=int, default=2, help='inputs dimension') parser.add_argument('--num_nodes', type=int, default=207, help='number of nodes') parser.add_argument('--batch_size',",
"str(i) + \"_\" + str(round(mvalid_loss, 2)) + \".pth\") print(\"Average Training Time: {:.4f} secs/epoch\".format(np.mean(train_time)))",
"parser.parse_args() def main(): # set seed # torch.manual_seed(args.seed) # np.random.seed(args.seed) # load data",
"(x, y) in enumerate(dataloader['test_loader'].get_iterator()): testx = torch.Tensor(x).to(device) testx = testx.transpose(1, 3) with torch.no_grad():",
"= [] valid_mape = [] valid_rmse = [] s1 = time.time() for iter,",
"argparse import time import util import matplotlib.pyplot as plt from engine import trainer",
"mvalid_mape, mvalid_rmse, (t2 - t1))) torch.save(engine.model.state_dict(), args.save + \"_epoch_\" + str(i) + \"_\"",
"Test RMSE: {:.4f}' print(log.format(i + 1, metrics[0], metrics[1], metrics[2])) amae.append(metrics[0]) amape.append(metrics[1]) armse.append(metrics[2]) log",
"= torch.Tensor(dataloader['y_test']).to(device) realy = realy.transpose(1, 3)[:, 0, :, :] for iter, (x, y)",
"metrics[0], metrics[1], metrics[2])) amae.append(metrics[0]) amape.append(metrics[1]) armse.append(metrics[2]) log = 'On average over 12 horizons,"
] |
[
"typeID - position (relative to tank origin) - size (fixed for most shapes)",
"- position (relative to tank origin) - size (fixed for most shapes) -",
"0 position = (0,0) size = (0,0) anchor = (0,0) layer = 0",
"= size self.layer = layer if anchor: self.anchor = anchor else: self.anchor =",
"the anchor rectangle relative to the tank origin (i.e. NOT relative to the",
"type = 0 position = (0,0) size = (0,0) anchor = (0,0) layer",
"(0,0) + size def absanchor(self): \"\"\" Returns the anchor rectangle relative to the",
"Base tank shape -- all other shapes derive from this Minimal requirements: -",
"= 0 def __init__(self, type, position, size, layer=0, anchor=None): # TODO: Don't forget",
"layer if anchor: self.anchor = anchor else: self.anchor = (0,0) + size def",
"for most shapes) - anchor rectangle (what portion of the shape must be",
"self.position = position self.size = size self.layer = layer if anchor: self.anchor =",
"their entire extent as their anchor rectangle) \"\"\" class base_shape(object): type = 0",
"anchor = (0,0) layer = 0 def __init__(self, type, position, size, layer=0, anchor=None):",
"__init__(self, type, position, size, layer=0, anchor=None): # TODO: Don't forget to do some",
"= anchor else: self.anchor = (0,0) + size def absanchor(self): \"\"\" Returns the",
"= (0,0) anchor = (0,0) layer = 0 def __init__(self, type, position, size,",
"anchor rectangle) \"\"\" class base_shape(object): type = 0 position = (0,0) size =",
"\"\"\" class base_shape(object): type = 0 position = (0,0) size = (0,0) anchor",
"# TODO: Don't forget to do some kind of validation! self.type = type",
"shapes) - anchor rectangle (what portion of the shape must be supported by",
"entire extent as their anchor rectangle) \"\"\" class base_shape(object): type = 0 position",
"type self.position = position self.size = size self.layer = layer if anchor: self.anchor",
"- typeID - position (relative to tank origin) - size (fixed for most",
"most shapes) - anchor rectangle (what portion of the shape must be supported",
"some kind of validation! self.type = type self.position = position self.size = size",
"other shapes derive from this Minimal requirements: - typeID - position (relative to",
"return self.position + (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3]) def __repr__(self): return '<shape(type={self.type})>'.format(self=self) def __str__(self): return '{0}(type={self.type},position={self.position},size={self.size},layer={self.layer},anchor={self.anchor})'.format(",
"this Minimal requirements: - typeID - position (relative to tank origin) - size",
"= 0 position = (0,0) size = (0,0) anchor = (0,0) layer =",
"self.anchor = anchor else: self.anchor = (0,0) + size def absanchor(self): \"\"\" Returns",
"the tank origin (i.e. NOT relative to the shape) \"\"\" return self.position +",
"self.layer = layer if anchor: self.anchor = anchor else: self.anchor = (0,0) +",
"shape must be supported by the lower layer; most shapes will define their",
"anchor=None): # TODO: Don't forget to do some kind of validation! self.type =",
"layer; most shapes will define their entire extent as their anchor rectangle) \"\"\"",
"size def absanchor(self): \"\"\" Returns the anchor rectangle relative to the tank origin",
"rectangle) \"\"\" class base_shape(object): type = 0 position = (0,0) size = (0,0)",
"rectangle relative to the tank origin (i.e. NOT relative to the shape) \"\"\"",
"def __init__(self, type, position, size, layer=0, anchor=None): # TODO: Don't forget to do",
"0 def __init__(self, type, position, size, layer=0, anchor=None): # TODO: Don't forget to",
"kind of validation! self.type = type self.position = position self.size = size self.layer",
"requirements: - typeID - position (relative to tank origin) - size (fixed for",
"to tank origin) - size (fixed for most shapes) - layer (fixed for",
"(what portion of the shape must be supported by the lower layer; most",
"relative to the tank origin (i.e. NOT relative to the shape) \"\"\" return",
"will define their entire extent as their anchor rectangle) \"\"\" class base_shape(object): type",
"-- all other shapes derive from this Minimal requirements: - typeID - position",
"layer=0, anchor=None): # TODO: Don't forget to do some kind of validation! self.type",
"origin (i.e. NOT relative to the shape) \"\"\" return self.position + (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3])",
"tank origin (i.e. NOT relative to the shape) \"\"\" return self.position + (self.position[0]+self.anchor[2],",
"tank origin) - size (fixed for most shapes) - layer (fixed for most",
"most shapes) - layer (fixed for most shapes) - anchor rectangle (what portion",
"- size (fixed for most shapes) - layer (fixed for most shapes) -",
"position = (0,0) size = (0,0) anchor = (0,0) layer = 0 def",
"size (fixed for most shapes) - layer (fixed for most shapes) - anchor",
"as their anchor rectangle) \"\"\" class base_shape(object): type = 0 position = (0,0)",
"position, size, layer=0, anchor=None): # TODO: Don't forget to do some kind of",
"to the shape) \"\"\" return self.position + (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3]) def __repr__(self): return '<shape(type={self.type})>'.format(self=self)",
"- layer (fixed for most shapes) - anchor rectangle (what portion of the",
"anchor rectangle relative to the tank origin (i.e. NOT relative to the shape)",
"validation! self.type = type self.position = position self.size = size self.layer = layer",
"shape) \"\"\" return self.position + (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3]) def __repr__(self): return '<shape(type={self.type})>'.format(self=self) def __str__(self):",
"+ size def absanchor(self): \"\"\" Returns the anchor rectangle relative to the tank",
"size, layer=0, anchor=None): # TODO: Don't forget to do some kind of validation!",
"(i.e. NOT relative to the shape) \"\"\" return self.position + (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3]) def",
"anchor: self.anchor = anchor else: self.anchor = (0,0) + size def absanchor(self): \"\"\"",
"supported by the lower layer; most shapes will define their entire extent as",
"<filename>modules/tankshapes/base.py \"\"\" Base tank shape -- all other shapes derive from this Minimal",
"\"\"\" Returns the anchor rectangle relative to the tank origin (i.e. NOT relative",
"to the tank origin (i.e. NOT relative to the shape) \"\"\" return self.position",
"= (0,0) layer = 0 def __init__(self, type, position, size, layer=0, anchor=None): #",
"\"\"\" Base tank shape -- all other shapes derive from this Minimal requirements:",
"size self.layer = layer if anchor: self.anchor = anchor else: self.anchor = (0,0)",
"(0,0) size = (0,0) anchor = (0,0) layer = 0 def __init__(self, type,",
"shape -- all other shapes derive from this Minimal requirements: - typeID -",
"Minimal requirements: - typeID - position (relative to tank origin) - size (fixed",
"forget to do some kind of validation! self.type = type self.position = position",
"layer (fixed for most shapes) - anchor rectangle (what portion of the shape",
"Returns the anchor rectangle relative to the tank origin (i.e. NOT relative to",
"do some kind of validation! self.type = type self.position = position self.size =",
"all other shapes derive from this Minimal requirements: - typeID - position (relative",
"must be supported by the lower layer; most shapes will define their entire",
"(0,0) anchor = (0,0) layer = 0 def __init__(self, type, position, size, layer=0,",
"position self.size = size self.layer = layer if anchor: self.anchor = anchor else:",
"from this Minimal requirements: - typeID - position (relative to tank origin) -",
"base_shape(object): type = 0 position = (0,0) size = (0,0) anchor = (0,0)",
"+ (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3]) def __repr__(self): return '<shape(type={self.type})>'.format(self=self) def __str__(self): return '{0}(type={self.type},position={self.position},size={self.size},layer={self.layer},anchor={self.anchor})'.format( type(self), self=self)",
"else: self.anchor = (0,0) + size def absanchor(self): \"\"\" Returns the anchor rectangle",
"= (0,0) + size def absanchor(self): \"\"\" Returns the anchor rectangle relative to",
"for most shapes) - layer (fixed for most shapes) - anchor rectangle (what",
"type, position, size, layer=0, anchor=None): # TODO: Don't forget to do some kind",
"= (0,0) size = (0,0) anchor = (0,0) layer = 0 def __init__(self,",
"TODO: Don't forget to do some kind of validation! self.type = type self.position",
"most shapes will define their entire extent as their anchor rectangle) \"\"\" class",
"origin) - size (fixed for most shapes) - layer (fixed for most shapes)",
"shapes) - layer (fixed for most shapes) - anchor rectangle (what portion of",
"define their entire extent as their anchor rectangle) \"\"\" class base_shape(object): type =",
"class base_shape(object): type = 0 position = (0,0) size = (0,0) anchor =",
"= layer if anchor: self.anchor = anchor else: self.anchor = (0,0) + size",
"extent as their anchor rectangle) \"\"\" class base_shape(object): type = 0 position =",
"lower layer; most shapes will define their entire extent as their anchor rectangle)",
"derive from this Minimal requirements: - typeID - position (relative to tank origin)",
"self.position + (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3]) def __repr__(self): return '<shape(type={self.type})>'.format(self=self) def __str__(self): return '{0}(type={self.type},position={self.position},size={self.size},layer={self.layer},anchor={self.anchor})'.format( type(self),",
"self.anchor = (0,0) + size def absanchor(self): \"\"\" Returns the anchor rectangle relative",
"(fixed for most shapes) - layer (fixed for most shapes) - anchor rectangle",
"of validation! self.type = type self.position = position self.size = size self.layer =",
"(relative to tank origin) - size (fixed for most shapes) - layer (fixed",
"the shape) \"\"\" return self.position + (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3]) def __repr__(self): return '<shape(type={self.type})>'.format(self=self) def",
"rectangle (what portion of the shape must be supported by the lower layer;",
"by the lower layer; most shapes will define their entire extent as their",
"size = (0,0) anchor = (0,0) layer = 0 def __init__(self, type, position,",
"self.type = type self.position = position self.size = size self.layer = layer if",
"shapes derive from this Minimal requirements: - typeID - position (relative to tank",
"def absanchor(self): \"\"\" Returns the anchor rectangle relative to the tank origin (i.e.",
"shapes will define their entire extent as their anchor rectangle) \"\"\" class base_shape(object):",
"(fixed for most shapes) - anchor rectangle (what portion of the shape must",
"layer = 0 def __init__(self, type, position, size, layer=0, anchor=None): # TODO: Don't",
"self.size = size self.layer = layer if anchor: self.anchor = anchor else: self.anchor",
"= type self.position = position self.size = size self.layer = layer if anchor:",
"= position self.size = size self.layer = layer if anchor: self.anchor = anchor",
"NOT relative to the shape) \"\"\" return self.position + (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3]) def __repr__(self):",
"the lower layer; most shapes will define their entire extent as their anchor",
"Don't forget to do some kind of validation! self.type = type self.position =",
"\"\"\" return self.position + (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3]) def __repr__(self): return '<shape(type={self.type})>'.format(self=self) def __str__(self): return",
"anchor else: self.anchor = (0,0) + size def absanchor(self): \"\"\" Returns the anchor",
"portion of the shape must be supported by the lower layer; most shapes",
"position (relative to tank origin) - size (fixed for most shapes) - layer",
"relative to the shape) \"\"\" return self.position + (self.position[0]+self.anchor[2], self.position[1]+self.anchor[3]) def __repr__(self): return",
"absanchor(self): \"\"\" Returns the anchor rectangle relative to the tank origin (i.e. NOT",
"- anchor rectangle (what portion of the shape must be supported by the",
"if anchor: self.anchor = anchor else: self.anchor = (0,0) + size def absanchor(self):",
"anchor rectangle (what portion of the shape must be supported by the lower",
"to do some kind of validation! self.type = type self.position = position self.size",
"be supported by the lower layer; most shapes will define their entire extent",
"tank shape -- all other shapes derive from this Minimal requirements: - typeID",
"(0,0) layer = 0 def __init__(self, type, position, size, layer=0, anchor=None): # TODO:",
"the shape must be supported by the lower layer; most shapes will define",
"their anchor rectangle) \"\"\" class base_shape(object): type = 0 position = (0,0) size",
"of the shape must be supported by the lower layer; most shapes will"
] |
[
"('url', models.URLField(unique=True)), ('title', models.CharField(max_length=100)), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ], options={ 'ordering': ['-date'], }, ),",
"[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Planet', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,",
"auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100)), ('slug', models.SlugField(unique_for_date=b'date')), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ('body', markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type', models.CharField(default=b'markdown',",
"('body', markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'),",
"settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Planet',",
"'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('_body_rendered', models.TextField(editable=False)), ('author', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-date'],",
"serialize=False, auto_created=True, primary_key=True)), ('url', models.URLField(unique=True)), ('title', models.CharField(max_length=100)), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ], options={ 'ordering':",
"(b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('_body_rendered', models.TextField(editable=False)), ('author', models.ForeignKey(editable=False,",
"(b'restructuredtext', 'Restructured Text')])), ('_body_rendered', models.TextField(editable=False)), ('author', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-date'], },",
"['-date'], }, ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100)),",
"models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ], options={ 'ordering': ['-date'], }, ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(verbose_name='ID',",
"models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100)), ('slug', models.SlugField(unique_for_date=b'date')), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ('body', markupfield.fields.MarkupField(rendered_field=True)),",
"options={ 'ordering': ['-date'], }, ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),",
"serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100)), ('slug', models.SlugField(unique_for_date=b'date')), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ('body', markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type',",
"markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext',",
"markupfield.fields import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),",
"] operations = [ migrations.CreateModel( name='Planet', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('url',",
"}, ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100)), ('slug',",
"import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]",
"= [ migrations.CreateModel( name='Planet', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('url', models.URLField(unique=True)), ('title',",
"-*- from __future__ import unicode_literals from django.db import models, migrations import markupfield.fields import",
"primary_key=True)), ('url', models.URLField(unique=True)), ('title', models.CharField(max_length=100)), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ], options={ 'ordering': ['-date'], },",
"('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ('body', markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain',",
"db_index=True)), ], options={ 'ordering': ['-date'], }, ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,",
"import markupfield.fields import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies = [",
"-*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations",
"], options={ 'ordering': ['-date'], }, ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,",
"from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =",
"models.CharField(max_length=100)), ('slug', models.SlugField(unique_for_date=b'date')), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ('body', markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'),",
"models.CharField(max_length=100)), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ], options={ 'ordering': ['-date'], }, ), migrations.CreateModel( name='Post', fields=[",
"models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])),",
"('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('url', models.URLField(unique=True)), ('title', models.CharField(max_length=100)), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ],",
"models.SlugField(unique_for_date=b'date')), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ('body', markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'), (b'html', 'HTML'),",
"'ordering': ['-date'], }, ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title',",
"import unicode_literals from django.db import models, migrations import markupfield.fields import django.utils.timezone from django.conf",
"primary_key=True)), ('title', models.CharField(max_length=100)), ('slug', models.SlugField(unique_for_date=b'date')), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ('body', markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type', models.CharField(default=b'markdown', max_length=30,",
"dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Planet', fields=[ ('id', models.AutoField(verbose_name='ID',",
"utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import markupfield.fields",
"name='Post', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100)), ('slug', models.SlugField(unique_for_date=b'date')), ('date', models.DateTimeField(default=django.utils.timezone.now,",
"= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Planet', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,",
"django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations",
"max_length=30, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('_body_rendered',",
"b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('_body_rendered', models.TextField(editable=False)), ('author',",
"'Restructured Text')])), ('_body_rendered', models.TextField(editable=False)), ('author', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-date'], }, ),",
"auto_created=True, primary_key=True)), ('url', models.URLField(unique=True)), ('title', models.CharField(max_length=100)), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ], options={ 'ordering': ['-date'],",
"# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models,",
"django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [",
"(b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('_body_rendered', models.TextField(editable=False)), ('author', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)), ],",
"class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Planet', fields=[",
"'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('_body_rendered', models.TextField(editable=False)), ('author', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)),",
"('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ], options={ 'ordering': ['-date'], }, ), migrations.CreateModel( name='Post', fields=[ ('id',",
"migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Planet', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),",
"name='Planet', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('url', models.URLField(unique=True)), ('title', models.CharField(max_length=100)), ('date', models.DateTimeField(default=django.utils.timezone.now,",
"fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('url', models.URLField(unique=True)), ('title', models.CharField(max_length=100)), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),",
"models, migrations import markupfield.fields import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies",
"), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100)), ('slug', models.SlugField(unique_for_date=b'date')),",
"migrations.CreateModel( name='Planet', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('url', models.URLField(unique=True)), ('title', models.CharField(max_length=100)), ('date',",
"('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100)), ('slug', models.SlugField(unique_for_date=b'date')), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ('body',",
"unicode_literals from django.db import models, migrations import markupfield.fields import django.utils.timezone from django.conf import",
"operations = [ migrations.CreateModel( name='Planet', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('url', models.URLField(unique=True)),",
"fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100)), ('slug', models.SlugField(unique_for_date=b'date')), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),",
"import models, migrations import markupfield.fields import django.utils.timezone from django.conf import settings class Migration(migrations.Migration):",
"from __future__ import unicode_literals from django.db import models, migrations import markupfield.fields import django.utils.timezone",
"django.db import models, migrations import markupfield.fields import django.utils.timezone from django.conf import settings class",
"models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('url', models.URLField(unique=True)), ('title', models.CharField(max_length=100)), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ], options={",
"Text')])), ('_body_rendered', models.TextField(editable=False)), ('author', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-date'], }, ), ]",
"('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured",
"from django.db import models, migrations import markupfield.fields import django.utils.timezone from django.conf import settings",
"migrations import markupfield.fields import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies =",
"models.URLField(unique=True)), ('title', models.CharField(max_length=100)), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ], options={ 'ordering': ['-date'], }, ), migrations.CreateModel(",
"models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ('body', markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'),",
"__future__ import unicode_literals from django.db import models, migrations import markupfield.fields import django.utils.timezone from",
"coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import",
"import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(",
"('title', models.CharField(max_length=100)), ('slug', models.SlugField(unique_for_date=b'date')), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ('body', markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'',",
"choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('_body_rendered', models.TextField(editable=False)),",
"[ migrations.CreateModel( name='Planet', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('url', models.URLField(unique=True)), ('title', models.CharField(max_length=100)),",
"'Plain'), (b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('_body_rendered', models.TextField(editable=False)), ('author', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)), ], options={",
"Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Planet', fields=[ ('id',",
"(b'markdown', 'Markdown'), (b'restructuredtext', 'Restructured Text')])), ('_body_rendered', models.TextField(editable=False)), ('author', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL)), ], options={ 'ordering':",
"('slug', models.SlugField(unique_for_date=b'date')), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ('body', markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'), (b'html',",
"db_index=True)), ('body', markupfield.fields.MarkupField(rendered_field=True)), ('body_markup_type', models.CharField(default=b'markdown', max_length=30, choices=[(b'', b'--'), (b'html', 'HTML'), (b'plain', 'Plain'), (b'markdown',",
"migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=100)), ('slug', models.SlugField(unique_for_date=b'date')), ('date',",
"('title', models.CharField(max_length=100)), ('date', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ], options={ 'ordering': ['-date'], }, ), migrations.CreateModel( name='Post',"
] |
[
": borrowerKey } result = self.collection.replace_one(filt,loan) # send out pub/sub notifications if result",
"monthly payment P == principal J == effective rate N == total number",
"doc['amountDue'] doc['status'] = 'received' # save var today from time import gmtime, strftime",
"# generate loanKey loan = self.generateLoanKey(loan) # convert values to NumberDecimal loan.convertDecimalToBson() #",
"loan @return string loanKey \"\"\" def generateLoanKey(self, loan) : from time import gmtime,",
"= annualRate / 100 / 12; monthly_payment = principal * ( effective_rate /",
"of simulated proposals @param float principal @param int numPayments @param list lenders :",
"config.config import Config from pymongo.cursor import CursorType from decimal import Decimal from bson.decimal128",
"/ 1000 # add a new proposal doc = self.generateProposal(principal, numPayments, annualRate, currency,",
"'received' # save var today from time import gmtime, strftime now = strftime('%Y-%m-%d',",
"}) return proposals \"\"\" Generates loan key @param biglittle.entity.loan.Loan loan @return string loanKey",
"today from time import gmtime, strftime now = strftime('%Y-%m-%d', gmtime()) doc['recvdate'] = now",
"loanInfo = { 'principal' : principal, 'numPayments' : numPayments, 'annualRate' : annualRate, 'effectiveRate'",
"'lenderBusiness' : lenderBusiness, 'overpayment' : 0.00, 'loanInfo' : loanInfo, 'payments' : [] }",
"@param float principal @param int numPayments @param list lenders : list of lender",
"# init vars config = Config() utils = Utils() result = False loanInfo",
"** -numPayments )) # create LoanInfo and Loan documents loanInfo = { 'principal'",
"payments @param float principal @param int numPayments @param float annualRate @param string currency",
"= 0.00 # convert amount paid to decimal.Decimal for processing purposes if not",
"int numPayments @param list lenders : list of lender keys @return dict \"\"\"",
"biglittle.entity.loan.Loan loan @return bool True if payment processed OK | False otherwise \"\"\"",
"processPayment(self, borrowerKey, amtPaid, loan) : # init vars config = Config() utils =",
"@return bool True if payment processed OK | False otherwise \"\"\" def processPayment(self,",
"if doc['amountPaid'] == 0 : # if underpayment, add to \"overpayment\" but do",
"bson.decimal128 import Decimal128 from utils.utils import Utils from biglittle.domain.base import Base from biglittle.entity.loan",
"rate and monthly payment effective_rate = annualRate / 100 / 12; monthly_payment =",
"# convert values to Decimal loan.convertBsonToDecimal() # have publisher notify subscribers self.publisher.trigger(self.publisher.EVENT_LOAN_UPDATE_BORROWER, {'loan':loan,",
"@param string lenderBusiness @return dict \"\"\" def generateProposal(self, principal, numPayments, annualRate, currency, borrowerKey,",
"pymongo import decimal from config.config import Config from pymongo.cursor import CursorType from decimal",
"documents loanInfo = { 'principal' : principal, 'numPayments' : numPayments, 'annualRate' : annualRate,",
"loan.convertBsonToDecimal() # have publisher notify subscribers self.publisher.trigger(self.publisher.EVENT_LOAN_UPDATE_BORROWER, {'loan':loan, 'amtPaid':amtPaid}) # done return result",
"strftime now = strftime('%Y-%m-%d', gmtime()) doc['recvdate'] = now break # update overpayment field",
"== principal J == effective rate N == total number of payments @param",
"Generates loan key @param biglittle.entity.loan.Loan loan @return string loanKey \"\"\" def generateLoanKey(self, loan)",
"12; monthly_payment = principal * ( effective_rate / (1 - (1 + effective_rate)",
"save(self, loan) : # generate loanKey loan = self.generateLoanKey(loan) # convert values to",
"Converts all BSON Decimal128 financial data fields to Decimal @param string borrowerKey @return",
": currency, 'monthlyPymt' : monthly_payment } loan = { 'borrowerKey' : borrowerKey, 'lenderKey'",
"/ (1 - (1 + J)**-N)) M == monthly payment P == principal",
"if success else False \"\"\" def save(self, loan) : # generate loanKey loan",
"import Decimal from bson.decimal128 import Decimal128 from utils.utils import Utils from biglittle.domain.base import",
"borrowerKey } result = self.collection.replace_one(filt,loan) # send out pub/sub notifications if result :",
"0.00, 'loanInfo' : loanInfo, 'payments' : [] } return loan \"\"\" Generates a",
"effective_rate) ** -numPayments )) # create LoanInfo and Loan documents loanInfo = {",
"\"\"\" def generateMany(self, principal, numPayments, currency, borrowerKey, lenders) : proposals = {} for",
"numPayments @param float annualRate @param string currency @param string borrowerKey @param string lenderKey",
"= {} for item in lenders : # pick an annual rate at",
"if loan : loanInfo = loan.getLoanInfo() amtDue = loanInfo.get('monthlyPymt').to_decimal() return amtDue \"\"\" Retrieves",
"@param string borrowerKey @param string lenderKey @param string lenderName @param string lenderBusiness @return",
"an annual rate at random import random annualRate = random.randint(1000,20000) / 1000 #",
"import Payment, LoanInfo, Loan class LoanService(Base) : # defaults collectName = 'biglittle.loans' \"\"\"",
"Saves loan document @param biglittle.entity.loan.Loan loan @return bool True if success else False",
"has been paid for doc in loan['payments'] : if doc['amountPaid'] == 0 :",
"lenderKey, lenderName, lenderBusiness) : # calc effective rate and monthly payment effective_rate =",
"return amtDue \"\"\" Retrieves loan for given borrower Converts all BSON Decimal128 financial",
"Looks for next scheduled payment for this borrower @param string borrowerKey @param float",
"def generateLoanKey(self, loan) : from time import gmtime, strftime date = strftime('%Y%m%d', gmtime())",
"decimal.Decimal for processing purposes if not isinstance(amtPaid, Decimal) : amtPaid = Decimal(amtPaid) #",
"loan = self.generateLoanKey(loan) # convert values to NumberDecimal loan.convertDecimalToBson() # save return self.collection.insert(loan)",
"convert values to NumberDecimal loan.convertDecimalToBson() # save return self.collection.insert(loan) \"\"\" Retrieves amount due",
"add to \"overpayment\" but do no further processing if amtPaid < amtDue :",
"loan \"\"\" Looks for next scheduled payment for this borrower @param string borrowerKey",
"item['key'] : doc }) return proposals \"\"\" Generates loan key @param biglittle.entity.loan.Loan loan",
"loan.convertDecimalToBson() # update by replacement filt = { 'borrowerKey' : borrowerKey } result",
"\"\"\" def generateLoanKey(self, loan) : from time import gmtime, strftime date = strftime('%Y%m%d',",
"/ 12; monthly_payment = principal * ( effective_rate / (1 - (1 +",
"financial data fields to Decimal @param string borrowerKey @return biglittle.entity.users.User instance \"\"\" def",
"# convert values to NumberDecimal loan.convertDecimalToBson() # save return self.collection.insert(loan) \"\"\" Retrieves amount",
"numPayments @param list lenders : list of lender keys @return dict \"\"\" def",
"principal @param int numPayments @param float annualRate @param string currency @param string borrowerKey",
"lenderBusiness) : # calc effective rate and monthly payment effective_rate = annualRate /",
"loan @return bool True if success else False \"\"\" def save(self, loan) :",
"borrowerKey @return biglittle.entity.users.User instance \"\"\" def fetchLoanByBorrowerKey(self, borrowerKey) : loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal()",
"break # update overpayment field currentOver = loan.get('overpayment') loan.set('overpayment', currentOver + overpayment) #",
"} result = self.collection.replace_one(filt,loan) # send out pub/sub notifications if result : #",
"( J / (1 - (1 + J)**-N)) M == monthly payment P",
": [] } return loan \"\"\" Generates a series of simulated proposals @param",
"list of lender keys @return dict \"\"\" def generateMany(self, principal, numPayments, currency, borrowerKey,",
"\"\"\" Saves loan document @param biglittle.entity.loan.Loan loan @return bool True if success else",
"J / (1 - (1 + J)**-N)) M == monthly payment P ==",
"def fetchLoanByBorrowerKey(self, borrowerKey) : loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal() return loan \"\"\" Looks for",
"@param int numPayments @param float annualRate @param string currency @param string borrowerKey @param",
": annualRate, 'effectiveRate' : effective_rate * 1000, 'currency' : currency, 'monthlyPymt' : monthly_payment",
"biglittle.entity.loan.Loan loan @return string loanKey \"\"\" def generateLoanKey(self, loan) : from time import",
"True if success else False \"\"\" def save(self, loan) : # generate loanKey",
"dict \"\"\" def generateProposal(self, principal, numPayments, annualRate, currency, borrowerKey, lenderKey, lenderName, lenderBusiness) :",
"1000 # add a new proposal doc = self.generateProposal(principal, numPayments, annualRate, currency, borrowerKey,",
"to Decimal loan.convertBsonToDecimal() # have publisher notify subscribers self.publisher.trigger(self.publisher.EVENT_LOAN_UPDATE_BORROWER, {'loan':loan, 'amtPaid':amtPaid}) # done",
"annualRate, currency, borrowerKey, lenderKey, lenderName, lenderBusiness) : # calc effective rate and monthly",
"item['business']) proposals.update({ item['key'] : doc }) return proposals \"\"\" Generates loan key @param",
"@param string borrowerKey @return Decimal amtDue \"\"\" def fetchAmtDueForBorrower(self, borrowerKey) : amtDue =",
"loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) if loan : loanInfo = loan.getLoanInfo() amtDue = loanInfo.get('monthlyPymt').to_decimal() return",
"# defaults collectName = 'biglittle.loans' \"\"\" Generates a proposal Formula: M = P",
"False otherwise \"\"\" def processPayment(self, borrowerKey, amtPaid, loan) : # init vars config",
"borrowerKey, item['key'], item['name'], item['business']) proposals.update({ item['key'] : doc }) return proposals \"\"\" Generates",
": loanInfo, 'payments' : [] } return loan \"\"\" Generates a series of",
"add a new proposal doc = self.generateProposal(principal, numPayments, annualRate, currency, borrowerKey, item['key'], item['name'],",
"@param list lenders : list of lender keys @return dict \"\"\" def generateMany(self,",
"= strftime('%Y%m%d', gmtime()) loan['loanKey'] = loan['borrowerKey'] + '_' + loan['lenderKey'] + '_' +",
"(1 + J)**-N)) M == monthly payment P == principal J == effective",
"Decimal(0.00) loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) if loan : loanInfo = loan.getLoanInfo() amtDue = loanInfo.get('monthlyPymt').to_decimal()",
"= { 'borrowerKey' : borrowerKey } result = self.collection.replace_one(filt,loan) # send out pub/sub",
"{ 'principal' : principal, 'numPayments' : numPayments, 'annualRate' : annualRate, 'effectiveRate' : effective_rate",
"def save(self, loan) : # generate loanKey loan = self.generateLoanKey(loan) # convert values",
"where nothing has been paid for doc in loan['payments'] : if doc['amountPaid'] ==",
"# update by replacement filt = { 'borrowerKey' : borrowerKey } result =",
"now break # update overpayment field currentOver = loan.get('overpayment') loan.set('overpayment', currentOver + overpayment)",
"proposals \"\"\" Generates loan key @param biglittle.entity.loan.Loan loan @return string loanKey \"\"\" def",
"notifications if result : # convert values to Decimal loan.convertBsonToDecimal() # have publisher",
"payment processed OK | False otherwise \"\"\" def processPayment(self, borrowerKey, amtPaid, loan) :",
"== monthly payment P == principal J == effective rate N == total",
"this borrower @param string borrowerKey @param float amtPaid @param biglittle.entity.loan.Loan loan @return bool",
": effective_rate * 1000, 'currency' : currency, 'monthlyPymt' : monthly_payment } loan =",
"amtDue : overpayment = amtPaid else : overpayment = amtPaid - amtDue #",
"module which interacts with the \"loans\" collection \"\"\" import pymongo import decimal from",
"* 1000, 'currency' : currency, 'monthlyPymt' : monthly_payment } loan = { 'borrowerKey'",
"monthly_payment } loan = { 'borrowerKey' : borrowerKey, 'lenderKey' : lenderKey, 'lenderName' :",
": principal, 'numPayments' : numPayments, 'annualRate' : annualRate, 'effectiveRate' : effective_rate * 1000,",
"Description: module which interacts with the \"loans\" collection \"\"\" import pymongo import decimal",
"+ loan['lenderKey'] + '_' + date return loan \"\"\" Saves loan document @param",
"if amtPaid < amtDue : overpayment = amtPaid else : overpayment = amtPaid",
"from biglittle.entity.loan import Payment, LoanInfo, Loan class LoanService(Base) : # defaults collectName =",
"generateMany(self, principal, numPayments, currency, borrowerKey, lenders) : proposals = {} for item in",
"loan) : # init vars config = Config() utils = Utils() result =",
"for processing purposes if not isinstance(amtPaid, Decimal) : amtPaid = Decimal(amtPaid) # find",
"@return Decimal amtDue \"\"\" def fetchAmtDueForBorrower(self, borrowerKey) : amtDue = Decimal(0.00) loan =",
"proposals.update({ item['key'] : doc }) return proposals \"\"\" Generates loan key @param biglittle.entity.loan.Loan",
"0.00 # convert amount paid to decimal.Decimal for processing purposes if not isinstance(amtPaid,",
"if payment processed OK | False otherwise \"\"\" def processPayment(self, borrowerKey, amtPaid, loan)",
"string borrowerKey @param float amtPaid @param biglittle.entity.loan.Loan loan @return bool True if payment",
"proposals = {} for item in lenders : # pick an annual rate",
"loanInfo.getMonthlyPayment() overpayment = 0.00 # convert amount paid to decimal.Decimal for processing purposes",
"\"\"\" def fetchLoanByBorrowerKey(self, borrowerKey) : loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal() return loan \"\"\" Looks",
"bool True if payment processed OK | False otherwise \"\"\" def processPayment(self, borrowerKey,",
"borrowerKey @param string lenderKey @param string lenderName @param string lenderBusiness @return dict \"\"\"",
"due for given borrower @param string borrowerKey @return Decimal amtDue \"\"\" def fetchAmtDueForBorrower(self,",
"apply payment doc['amountPaid'] = doc['amountDue'] doc['status'] = 'received' # save var today from",
"= False loanInfo = loan.getLoanInfo() amtDue = loanInfo.getMonthlyPayment() overpayment = 0.00 # convert",
"'monthlyPymt' : monthly_payment } loan = { 'borrowerKey' : borrowerKey, 'lenderKey' : lenderKey,",
"string currency @param string borrowerKey @param string lenderKey @param string lenderName @param string",
"effective_rate = annualRate / 100 / 12; monthly_payment = principal * ( effective_rate",
"processing if amtPaid < amtDue : overpayment = amtPaid else : overpayment =",
"convert values to NumberDecimal loan.convertDecimalToBson() # update by replacement filt = { 'borrowerKey'",
"loan : loanInfo = loan.getLoanInfo() amtDue = loanInfo.get('monthlyPymt').to_decimal() return amtDue \"\"\" Retrieves loan",
"Decimal loan.convertBsonToDecimal() # have publisher notify subscribers self.publisher.trigger(self.publisher.EVENT_LOAN_UPDATE_BORROWER, {'loan':loan, 'amtPaid':amtPaid}) # done return",
"do no further processing if amtPaid < amtDue : overpayment = amtPaid else",
"payment effective_rate = annualRate / 100 / 12; monthly_payment = principal * (",
"/ 100 / 12; monthly_payment = principal * ( effective_rate / (1 -",
"amtPaid < amtDue : overpayment = amtPaid else : overpayment = amtPaid -",
"self.generateLoanKey(loan) # convert values to NumberDecimal loan.convertDecimalToBson() # save return self.collection.insert(loan) \"\"\" Retrieves",
"import pymongo import decimal from config.config import Config from pymongo.cursor import CursorType from",
"send out pub/sub notifications if result : # convert values to Decimal loan.convertBsonToDecimal()",
"@param float annualRate @param string currency @param string borrowerKey @param string lenderKey @param",
"return self.collection.insert(loan) \"\"\" Retrieves amount due for given borrower @param string borrowerKey @return",
"instance \"\"\" def fetchLoanByBorrowerKey(self, borrowerKey) : loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal() return loan \"\"\"",
"loan) : # generate loanKey loan = self.generateLoanKey(loan) # convert values to NumberDecimal",
"principal, numPayments, annualRate, currency, borrowerKey, lenderKey, lenderName, lenderBusiness) : # calc effective rate",
": 0.00, 'loanInfo' : loanInfo, 'payments' : [] } return loan \"\"\" Generates",
"Decimal(amtPaid) # find first payment where nothing has been paid for doc in",
"Config() utils = Utils() result = False loanInfo = loan.getLoanInfo() amtDue = loanInfo.getMonthlyPayment()",
"* ( effective_rate / (1 - (1 + effective_rate) ** -numPayments )) #",
"\"\"\" import pymongo import decimal from config.config import Config from pymongo.cursor import CursorType",
"doc in loan['payments'] : if doc['amountPaid'] == 0 : # if underpayment, add",
"currency, borrowerKey, item['key'], item['name'], item['business']) proposals.update({ item['key'] : doc }) return proposals \"\"\"",
"amtDue = loanInfo.get('monthlyPymt').to_decimal() return amtDue \"\"\" Retrieves loan for given borrower Converts all",
"string lenderKey @param string lenderName @param string lenderBusiness @return dict \"\"\" def generateProposal(self,",
"float principal @param int numPayments @param list lenders : list of lender keys",
"@return string loanKey \"\"\" def generateLoanKey(self, loan) : from time import gmtime, strftime",
"@return dict \"\"\" def generateMany(self, principal, numPayments, currency, borrowerKey, lenders) : proposals =",
"values to NumberDecimal loan.convertDecimalToBson() # save return self.collection.insert(loan) \"\"\" Retrieves amount due for",
"\"\"\" def processPayment(self, borrowerKey, amtPaid, loan) : # init vars config = Config()",
"= loan.getLoanInfo() amtDue = loanInfo.getMonthlyPayment() overpayment = 0.00 # convert amount paid to",
"= self.generateProposal(principal, numPayments, annualRate, currency, borrowerKey, item['key'], item['name'], item['business']) proposals.update({ item['key'] : doc",
": loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal() return loan \"\"\" Looks for next scheduled payment",
"# save var today from time import gmtime, strftime now = strftime('%Y-%m-%d', gmtime())",
"date = strftime('%Y%m%d', gmtime()) loan['loanKey'] = loan['borrowerKey'] + '_' + loan['lenderKey'] + '_'",
"create LoanInfo and Loan documents loanInfo = { 'principal' : principal, 'numPayments' :",
"Generates a series of simulated proposals @param float principal @param int numPayments @param",
"Config from pymongo.cursor import CursorType from decimal import Decimal from bson.decimal128 import Decimal128",
"True if payment processed OK | False otherwise \"\"\" def processPayment(self, borrowerKey, amtPaid,",
"numPayments, annualRate, currency, borrowerKey, lenderKey, lenderName, lenderBusiness) : # calc effective rate and",
"var today from time import gmtime, strftime now = strftime('%Y-%m-%d', gmtime()) doc['recvdate'] =",
"interacts with the \"loans\" collection \"\"\" import pymongo import decimal from config.config import",
"find first payment where nothing has been paid for doc in loan['payments'] :",
"effective_rate / (1 - (1 + effective_rate) ** -numPayments )) # create LoanInfo",
": list of lender keys @return dict \"\"\" def generateMany(self, principal, numPayments, currency,",
"Decimal from bson.decimal128 import Decimal128 from utils.utils import Utils from biglittle.domain.base import Base",
"'biglittle.loans' \"\"\" Generates a proposal Formula: M = P * ( J /",
"for this borrower @param string borrowerKey @param float amtPaid @param biglittle.entity.loan.Loan loan @return",
"doc['amountPaid'] = doc['amountDue'] doc['status'] = 'received' # save var today from time import",
"= loan.get('overpayment') loan.set('overpayment', currentOver + overpayment) # convert values to NumberDecimal loan.convertDecimalToBson() #",
"\"\"\" def fetchAmtDueForBorrower(self, borrowerKey) : amtDue = Decimal(0.00) loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) if loan",
"= P * ( J / (1 - (1 + J)**-N)) M ==",
"numPayments, annualRate, currency, borrowerKey, item['key'], item['name'], item['business']) proposals.update({ item['key'] : doc }) return",
"from config.config import Config from pymongo.cursor import CursorType from decimal import Decimal from",
"string loanKey \"\"\" def generateLoanKey(self, loan) : from time import gmtime, strftime date",
"import Base from biglittle.entity.loan import Payment, LoanInfo, Loan class LoanService(Base) : # defaults",
"P == principal J == effective rate N == total number of payments",
"item['key'], item['name'], item['business']) proposals.update({ item['key'] : doc }) return proposals \"\"\" Generates loan",
"underpayment, add to \"overpayment\" but do no further processing if amtPaid < amtDue",
"import random annualRate = random.randint(1000,20000) / 1000 # add a new proposal doc",
"= 'biglittle.loans' \"\"\" Generates a proposal Formula: M = P * ( J",
"proposal Formula: M = P * ( J / (1 - (1 +",
"borrowerKey, lenderKey, lenderName, lenderBusiness) : # calc effective rate and monthly payment effective_rate",
"monthly_payment = principal * ( effective_rate / (1 - (1 + effective_rate) **",
"next scheduled payment for this borrower @param string borrowerKey @param float amtPaid @param",
"values to Decimal loan.convertBsonToDecimal() # have publisher notify subscribers self.publisher.trigger(self.publisher.EVENT_LOAN_UPDATE_BORROWER, {'loan':loan, 'amtPaid':amtPaid}) #",
": # defaults collectName = 'biglittle.loans' \"\"\" Generates a proposal Formula: M =",
"principal, 'numPayments' : numPayments, 'annualRate' : annualRate, 'effectiveRate' : effective_rate * 1000, 'currency'",
"LoanService(Base) : # defaults collectName = 'biglittle.loans' \"\"\" Generates a proposal Formula: M",
"# calc effective rate and monthly payment effective_rate = annualRate / 100 /",
"loan document @param biglittle.entity.loan.Loan loan @return bool True if success else False \"\"\"",
"defaults collectName = 'biglittle.loans' \"\"\" Generates a proposal Formula: M = P *",
"strftime date = strftime('%Y%m%d', gmtime()) loan['loanKey'] = loan['borrowerKey'] + '_' + loan['lenderKey'] +",
"\"\"\" Looks for next scheduled payment for this borrower @param string borrowerKey @param",
"loanKey loan = self.generateLoanKey(loan) # convert values to NumberDecimal loan.convertDecimalToBson() # save return",
": # calc effective rate and monthly payment effective_rate = annualRate / 100",
"\"\"\" Generates a proposal Formula: M = P * ( J / (1",
"convert amount paid to decimal.Decimal for processing purposes if not isinstance(amtPaid, Decimal) :",
"- amtDue # apply payment doc['amountPaid'] = doc['amountDue'] doc['status'] = 'received' # save",
"processing purposes if not isinstance(amtPaid, Decimal) : amtPaid = Decimal(amtPaid) # find first",
"loan \"\"\" Saves loan document @param biglittle.entity.loan.Loan loan @return bool True if success",
"given borrower @param string borrowerKey @return Decimal amtDue \"\"\" def fetchAmtDueForBorrower(self, borrowerKey) :",
"return loan \"\"\" Looks for next scheduled payment for this borrower @param string",
"payment P == principal J == effective rate N == total number of",
"update overpayment field currentOver = loan.get('overpayment') loan.set('overpayment', currentOver + overpayment) # convert values",
"given borrower Converts all BSON Decimal128 financial data fields to Decimal @param string",
"series of simulated proposals @param float principal @param int numPayments @param list lenders",
": overpayment = amtPaid else : overpayment = amtPaid - amtDue # apply",
"random import random annualRate = random.randint(1000,20000) / 1000 # add a new proposal",
"from biglittle.domain.base import Base from biglittle.entity.loan import Payment, LoanInfo, Loan class LoanService(Base) :",
"1000, 'currency' : currency, 'monthlyPymt' : monthly_payment } loan = { 'borrowerKey' :",
"calc effective rate and monthly payment effective_rate = annualRate / 100 / 12;",
"loanKey \"\"\" def generateLoanKey(self, loan) : from time import gmtime, strftime date =",
"} loan = { 'borrowerKey' : borrowerKey, 'lenderKey' : lenderKey, 'lenderName' : lenderName,",
"P * ( J / (1 - (1 + J)**-N)) M == monthly",
"import Decimal128 from utils.utils import Utils from biglittle.domain.base import Base from biglittle.entity.loan import",
"gmtime, strftime now = strftime('%Y-%m-%d', gmtime()) doc['recvdate'] = now break # update overpayment",
"annualRate @param string currency @param string borrowerKey @param string lenderKey @param string lenderName",
"to NumberDecimal loan.convertDecimalToBson() # update by replacement filt = { 'borrowerKey' : borrowerKey",
"borrowerKey, 'lenderKey' : lenderKey, 'lenderName' : lenderName, 'lenderBusiness' : lenderBusiness, 'overpayment' : 0.00,",
"'loanInfo' : loanInfo, 'payments' : [] } return loan \"\"\" Generates a series",
"to Decimal @param string borrowerKey @return biglittle.entity.users.User instance \"\"\" def fetchLoanByBorrowerKey(self, borrowerKey) :",
"utils.utils import Utils from biglittle.domain.base import Base from biglittle.entity.loan import Payment, LoanInfo, Loan",
"float annualRate @param string currency @param string borrowerKey @param string lenderKey @param string",
"@param int numPayments @param list lenders : list of lender keys @return dict",
"for given borrower Converts all BSON Decimal128 financial data fields to Decimal @param",
"fields to Decimal @param string borrowerKey @return biglittle.entity.users.User instance \"\"\" def fetchLoanByBorrowerKey(self, borrowerKey)",
"< amtDue : overpayment = amtPaid else : overpayment = amtPaid - amtDue",
"biglittle.entity.loan import Payment, LoanInfo, Loan class LoanService(Base) : # defaults collectName = 'biglittle.loans'",
"def generateMany(self, principal, numPayments, currency, borrowerKey, lenders) : proposals = {} for item",
"been paid for doc in loan['payments'] : if doc['amountPaid'] == 0 : #",
"borrowerKey, lenders) : proposals = {} for item in lenders : # pick",
"pub/sub notifications if result : # convert values to Decimal loan.convertBsonToDecimal() # have",
"lenderName, 'lenderBusiness' : lenderBusiness, 'overpayment' : 0.00, 'loanInfo' : loanInfo, 'payments' : []",
"principal, numPayments, currency, borrowerKey, lenders) : proposals = {} for item in lenders",
"loan.convertDecimalToBson() # save return self.collection.insert(loan) \"\"\" Retrieves amount due for given borrower @param",
"LoanInfo, Loan class LoanService(Base) : # defaults collectName = 'biglittle.loans' \"\"\" Generates a",
"string lenderBusiness @return dict \"\"\" def generateProposal(self, principal, numPayments, annualRate, currency, borrowerKey, lenderKey,",
"all BSON Decimal128 financial data fields to Decimal @param string borrowerKey @return biglittle.entity.users.User",
"list lenders : list of lender keys @return dict \"\"\" def generateMany(self, principal,",
"currentOver + overpayment) # convert values to NumberDecimal loan.convertDecimalToBson() # update by replacement",
"'_' + date return loan \"\"\" Saves loan document @param biglittle.entity.loan.Loan loan @return",
"overpayment) # convert values to NumberDecimal loan.convertDecimalToBson() # update by replacement filt =",
"Loan class LoanService(Base) : # defaults collectName = 'biglittle.loans' \"\"\" Generates a proposal",
"False \"\"\" def save(self, loan) : # generate loanKey loan = self.generateLoanKey(loan) #",
"doc['recvdate'] = now break # update overpayment field currentOver = loan.get('overpayment') loan.set('overpayment', currentOver",
"of payments @param float principal @param int numPayments @param float annualRate @param string",
"vars config = Config() utils = Utils() result = False loanInfo = loan.getLoanInfo()",
"lenders : # pick an annual rate at random import random annualRate =",
"fetchAmtDueForBorrower(self, borrowerKey) : amtDue = Decimal(0.00) loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) if loan : loanInfo",
"= amtPaid else : overpayment = amtPaid - amtDue # apply payment doc['amountPaid']",
"a proposal Formula: M = P * ( J / (1 - (1",
"@return biglittle.entity.users.User instance \"\"\" def fetchLoanByBorrowerKey(self, borrowerKey) : loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal() return",
"init vars config = Config() utils = Utils() result = False loanInfo =",
"rate N == total number of payments @param float principal @param int numPayments",
"amount due for given borrower @param string borrowerKey @return Decimal amtDue \"\"\" def",
": monthly_payment } loan = { 'borrowerKey' : borrowerKey, 'lenderKey' : lenderKey, 'lenderName'",
"generate loanKey loan = self.generateLoanKey(loan) # convert values to NumberDecimal loan.convertDecimalToBson() # save",
"import CursorType from decimal import Decimal from bson.decimal128 import Decimal128 from utils.utils import",
"random annualRate = random.randint(1000,20000) / 1000 # add a new proposal doc =",
"to \"overpayment\" but do no further processing if amtPaid < amtDue : overpayment",
"decimal import Decimal from bson.decimal128 import Decimal128 from utils.utils import Utils from biglittle.domain.base",
"document @param biglittle.entity.loan.Loan loan @return bool True if success else False \"\"\" def",
"a new proposal doc = self.generateProposal(principal, numPayments, annualRate, currency, borrowerKey, item['key'], item['name'], item['business'])",
"save var today from time import gmtime, strftime now = strftime('%Y-%m-%d', gmtime()) doc['recvdate']",
"= now break # update overpayment field currentOver = loan.get('overpayment') loan.set('overpayment', currentOver +",
": lenderName, 'lenderBusiness' : lenderBusiness, 'overpayment' : 0.00, 'loanInfo' : loanInfo, 'payments' :",
"if result : # convert values to Decimal loan.convertBsonToDecimal() # have publisher notify",
"Base from biglittle.entity.loan import Payment, LoanInfo, Loan class LoanService(Base) : # defaults collectName",
"# send out pub/sub notifications if result : # convert values to Decimal",
"= { 'principal' : principal, 'numPayments' : numPayments, 'annualRate' : annualRate, 'effectiveRate' :",
"(1 - (1 + effective_rate) ** -numPayments )) # create LoanInfo and Loan",
"@param float amtPaid @param biglittle.entity.loan.Loan loan @return bool True if payment processed OK",
"from time import gmtime, strftime now = strftime('%Y-%m-%d', gmtime()) doc['recvdate'] = now break",
"= { 'borrowerKey' : borrowerKey, 'lenderKey' : lenderKey, 'lenderName' : lenderName, 'lenderBusiness' :",
"string lenderName @param string lenderBusiness @return dict \"\"\" def generateProposal(self, principal, numPayments, annualRate,",
"collectName = 'biglittle.loans' \"\"\" Generates a proposal Formula: M = P * (",
"borrowerKey @param float amtPaid @param biglittle.entity.loan.Loan loan @return bool True if payment processed",
"out pub/sub notifications if result : # convert values to Decimal loan.convertBsonToDecimal() #",
"= self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal() return loan \"\"\" Looks for next scheduled payment for this",
"loanInfo = loan.getLoanInfo() amtDue = loanInfo.getMonthlyPayment() overpayment = 0.00 # convert amount paid",
"overpayment = amtPaid else : overpayment = amtPaid - amtDue # apply payment",
"= self.generateLoanKey(loan) # convert values to NumberDecimal loan.convertDecimalToBson() # save return self.collection.insert(loan) \"\"\"",
": lenderKey, 'lenderName' : lenderName, 'lenderBusiness' : lenderBusiness, 'overpayment' : 0.00, 'loanInfo' :",
": overpayment = amtPaid - amtDue # apply payment doc['amountPaid'] = doc['amountDue'] doc['status']",
"# update overpayment field currentOver = loan.get('overpayment') loan.set('overpayment', currentOver + overpayment) # convert",
"loan = { 'borrowerKey' : borrowerKey, 'lenderKey' : lenderKey, 'lenderName' : lenderName, 'lenderBusiness'",
"amtPaid - amtDue # apply payment doc['amountPaid'] = doc['amountDue'] doc['status'] = 'received' #",
"from bson.decimal128 import Decimal128 from utils.utils import Utils from biglittle.domain.base import Base from",
": lenderBusiness, 'overpayment' : 0.00, 'loanInfo' : loanInfo, 'payments' : [] } return",
"self.generateProposal(principal, numPayments, annualRate, currency, borrowerKey, item['key'], item['name'], item['business']) proposals.update({ item['key'] : doc })",
"'payments' : [] } return loan \"\"\" Generates a series of simulated proposals",
"effective rate N == total number of payments @param float principal @param int",
"lenderBusiness, 'overpayment' : 0.00, 'loanInfo' : loanInfo, 'payments' : [] } return loan",
"by replacement filt = { 'borrowerKey' : borrowerKey } result = self.collection.replace_one(filt,loan) #",
"loan.getLoanInfo() amtDue = loanInfo.get('monthlyPymt').to_decimal() return amtDue \"\"\" Retrieves loan for given borrower Converts",
"borrower Converts all BSON Decimal128 financial data fields to Decimal @param string borrowerKey",
": amtPaid = Decimal(amtPaid) # find first payment where nothing has been paid",
"doc['amountPaid'] == 0 : # if underpayment, add to \"overpayment\" but do no",
"\"loans\" collection \"\"\" import pymongo import decimal from config.config import Config from pymongo.cursor",
"'annualRate' : annualRate, 'effectiveRate' : effective_rate * 1000, 'currency' : currency, 'monthlyPymt' :",
"biglittle.entity.users.User instance \"\"\" def fetchLoanByBorrowerKey(self, borrowerKey) : loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal() return loan",
"monthly payment effective_rate = annualRate / 100 / 12; monthly_payment = principal *",
"(1 - (1 + J)**-N)) M == monthly payment P == principal J",
"- (1 + effective_rate) ** -numPayments )) # create LoanInfo and Loan documents",
"item['name'], item['business']) proposals.update({ item['key'] : doc }) return proposals \"\"\" Generates loan key",
"from pymongo.cursor import CursorType from decimal import Decimal from bson.decimal128 import Decimal128 from",
"@param string lenderKey @param string lenderName @param string lenderBusiness @return dict \"\"\" def",
"numPayments, currency, borrowerKey, lenders) : proposals = {} for item in lenders :",
"replacement filt = { 'borrowerKey' : borrowerKey } result = self.collection.replace_one(filt,loan) # send",
"Loan documents loanInfo = { 'principal' : principal, 'numPayments' : numPayments, 'annualRate' :",
"pymongo.cursor import CursorType from decimal import Decimal from bson.decimal128 import Decimal128 from utils.utils",
"borrowerKey) : loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal() return loan \"\"\" Looks for next scheduled",
"time import gmtime, strftime date = strftime('%Y%m%d', gmtime()) loan['loanKey'] = loan['borrowerKey'] + '_'",
": if doc['amountPaid'] == 0 : # if underpayment, add to \"overpayment\" but",
"doc }) return proposals \"\"\" Generates loan key @param biglittle.entity.loan.Loan loan @return string",
"time import gmtime, strftime now = strftime('%Y-%m-%d', gmtime()) doc['recvdate'] = now break #",
"+ overpayment) # convert values to NumberDecimal loan.convertDecimalToBson() # update by replacement filt",
": doc }) return proposals \"\"\" Generates loan key @param biglittle.entity.loan.Loan loan @return",
"\"\"\" Retrieves loan for given borrower Converts all BSON Decimal128 financial data fields",
"first payment where nothing has been paid for doc in loan['payments'] : if",
"filt = { 'borrowerKey' : borrowerKey } result = self.collection.replace_one(filt,loan) # send out",
"if underpayment, add to \"overpayment\" but do no further processing if amtPaid <",
"payment where nothing has been paid for doc in loan['payments'] : if doc['amountPaid']",
"amtDue = Decimal(0.00) loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) if loan : loanInfo = loan.getLoanInfo() amtDue",
"purposes if not isinstance(amtPaid, Decimal) : amtPaid = Decimal(amtPaid) # find first payment",
"@param biglittle.entity.loan.Loan loan @return string loanKey \"\"\" def generateLoanKey(self, loan) : from time",
"{} for item in lenders : # pick an annual rate at random",
"@param biglittle.entity.loan.Loan loan @return bool True if payment processed OK | False otherwise",
"lenderKey, 'lenderName' : lenderName, 'lenderBusiness' : lenderBusiness, 'overpayment' : 0.00, 'loanInfo' : loanInfo,",
"amtPaid @param biglittle.entity.loan.Loan loan @return bool True if payment processed OK | False",
"processed OK | False otherwise \"\"\" def processPayment(self, borrowerKey, amtPaid, loan) : #",
"import Config from pymongo.cursor import CursorType from decimal import Decimal from bson.decimal128 import",
": proposals = {} for item in lenders : # pick an annual",
"date return loan \"\"\" Saves loan document @param biglittle.entity.loan.Loan loan @return bool True",
"borrower @param string borrowerKey @return Decimal amtDue \"\"\" def fetchAmtDueForBorrower(self, borrowerKey) : amtDue",
"paid for doc in loan['payments'] : if doc['amountPaid'] == 0 : # if",
"overpayment = 0.00 # convert amount paid to decimal.Decimal for processing purposes if",
"# convert values to NumberDecimal loan.convertDecimalToBson() # update by replacement filt = {",
"loan) : from time import gmtime, strftime date = strftime('%Y%m%d', gmtime()) loan['loanKey'] =",
"payment for this borrower @param string borrowerKey @param float amtPaid @param biglittle.entity.loan.Loan loan",
"'lenderKey' : lenderKey, 'lenderName' : lenderName, 'lenderBusiness' : lenderBusiness, 'overpayment' : 0.00, 'loanInfo'",
"lenderName, lenderBusiness) : # calc effective rate and monthly payment effective_rate = annualRate",
"fetchLoanByBorrowerKey(self, borrowerKey) : loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal() return loan \"\"\" Looks for next",
"at random import random annualRate = random.randint(1000,20000) / 1000 # add a new",
"save return self.collection.insert(loan) \"\"\" Retrieves amount due for given borrower @param string borrowerKey",
"== 0 : # if underpayment, add to \"overpayment\" but do no further",
"annualRate, 'effectiveRate' : effective_rate * 1000, 'currency' : currency, 'monthlyPymt' : monthly_payment }",
"no further processing if amtPaid < amtDue : overpayment = amtPaid else :",
"J == effective rate N == total number of payments @param float principal",
"# pick an annual rate at random import random annualRate = random.randint(1000,20000) /",
"the \"loans\" collection \"\"\" import pymongo import decimal from config.config import Config from",
"string borrowerKey @return Decimal amtDue \"\"\" def fetchAmtDueForBorrower(self, borrowerKey) : amtDue = Decimal(0.00)",
"def processPayment(self, borrowerKey, amtPaid, loan) : # init vars config = Config() utils",
"+ '_' + loan['lenderKey'] + '_' + date return loan \"\"\" Saves loan",
"payment doc['amountPaid'] = doc['amountDue'] doc['status'] = 'received' # save var today from time",
"strftime('%Y%m%d', gmtime()) loan['loanKey'] = loan['borrowerKey'] + '_' + loan['lenderKey'] + '_' + date",
"but do no further processing if amtPaid < amtDue : overpayment = amtPaid",
"to decimal.Decimal for processing purposes if not isinstance(amtPaid, Decimal) : amtPaid = Decimal(amtPaid)",
"effective_rate * 1000, 'currency' : currency, 'monthlyPymt' : monthly_payment } loan = {",
"NumberDecimal loan.convertDecimalToBson() # save return self.collection.insert(loan) \"\"\" Retrieves amount due for given borrower",
"= loanInfo.getMonthlyPayment() overpayment = 0.00 # convert amount paid to decimal.Decimal for processing",
"Decimal128 financial data fields to Decimal @param string borrowerKey @return biglittle.entity.users.User instance \"\"\"",
"loan['borrowerKey'] + '_' + loan['lenderKey'] + '_' + date return loan \"\"\" Saves",
"return loan \"\"\" Saves loan document @param biglittle.entity.loan.Loan loan @return bool True if",
"int numPayments @param float annualRate @param string currency @param string borrowerKey @param string",
"lenderBusiness @return dict \"\"\" def generateProposal(self, principal, numPayments, annualRate, currency, borrowerKey, lenderKey, lenderName,",
"100 / 12; monthly_payment = principal * ( effective_rate / (1 - (1",
"and Loan documents loanInfo = { 'principal' : principal, 'numPayments' : numPayments, 'annualRate'",
"+ '_' + date return loan \"\"\" Saves loan document @param biglittle.entity.loan.Loan loan",
"pick an annual rate at random import random annualRate = random.randint(1000,20000) / 1000",
"Utils() result = False loanInfo = loan.getLoanInfo() amtDue = loanInfo.getMonthlyPayment() overpayment = 0.00",
": numPayments, 'annualRate' : annualRate, 'effectiveRate' : effective_rate * 1000, 'currency' : currency,",
"= loanInfo.get('monthlyPymt').to_decimal() return amtDue \"\"\" Retrieves loan for given borrower Converts all BSON",
"J)**-N)) M == monthly payment P == principal J == effective rate N",
"import Utils from biglittle.domain.base import Base from biglittle.entity.loan import Payment, LoanInfo, Loan class",
": from time import gmtime, strftime date = strftime('%Y%m%d', gmtime()) loan['loanKey'] = loan['borrowerKey']",
"float principal @param int numPayments @param float annualRate @param string currency @param string",
"loan \"\"\" Generates a series of simulated proposals @param float principal @param int",
"# if underpayment, add to \"overpayment\" but do no further processing if amtPaid",
"# save return self.collection.insert(loan) \"\"\" Retrieves amount due for given borrower @param string",
"which interacts with the \"loans\" collection \"\"\" import pymongo import decimal from config.config",
"borrower @param string borrowerKey @param float amtPaid @param biglittle.entity.loan.Loan loan @return bool True",
"from time import gmtime, strftime date = strftime('%Y%m%d', gmtime()) loan['loanKey'] = loan['borrowerKey'] +",
"Decimal) : amtPaid = Decimal(amtPaid) # find first payment where nothing has been",
"result = False loanInfo = loan.getLoanInfo() amtDue = loanInfo.getMonthlyPayment() overpayment = 0.00 #",
"= Decimal(0.00) loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) if loan : loanInfo = loan.getLoanInfo() amtDue =",
"0 : # if underpayment, add to \"overpayment\" but do no further processing",
"collection \"\"\" import pymongo import decimal from config.config import Config from pymongo.cursor import",
"annualRate = random.randint(1000,20000) / 1000 # add a new proposal doc = self.generateProposal(principal,",
"= Utils() result = False loanInfo = loan.getLoanInfo() amtDue = loanInfo.getMonthlyPayment() overpayment =",
"import gmtime, strftime date = strftime('%Y%m%d', gmtime()) loan['loanKey'] = loan['borrowerKey'] + '_' +",
"= self.collection.replace_one(filt,loan) # send out pub/sub notifications if result : # convert values",
"N == total number of payments @param float principal @param int numPayments @param",
"= amtPaid - amtDue # apply payment doc['amountPaid'] = doc['amountDue'] doc['status'] = 'received'",
"float amtPaid @param biglittle.entity.loan.Loan loan @return bool True if payment processed OK |",
"= 'received' # save var today from time import gmtime, strftime now =",
"OK | False otherwise \"\"\" def processPayment(self, borrowerKey, amtPaid, loan) : # init",
"string borrowerKey @return biglittle.entity.users.User instance \"\"\" def fetchLoanByBorrowerKey(self, borrowerKey) : loan = self.collection.find_one({\"borrowerKey\":borrowerKey})",
"* ( J / (1 - (1 + J)**-N)) M == monthly payment",
"biglittle.entity.loan.Loan loan @return bool True if success else False \"\"\" def save(self, loan)",
"config = Config() utils = Utils() result = False loanInfo = loan.getLoanInfo() amtDue",
"generateLoanKey(self, loan) : from time import gmtime, strftime date = strftime('%Y%m%d', gmtime()) loan['loanKey']",
"isinstance(amtPaid, Decimal) : amtPaid = Decimal(amtPaid) # find first payment where nothing has",
"\"\"\" biglittle.domain.loan Description: module which interacts with the \"loans\" collection \"\"\" import pymongo",
"lenders : list of lender keys @return dict \"\"\" def generateMany(self, principal, numPayments,",
"annual rate at random import random annualRate = random.randint(1000,20000) / 1000 # add",
"@param float principal @param int numPayments @param float annualRate @param string currency @param",
"annualRate / 100 / 12; monthly_payment = principal * ( effective_rate / (1",
"item in lenders : # pick an annual rate at random import random",
"class LoanService(Base) : # defaults collectName = 'biglittle.loans' \"\"\" Generates a proposal Formula:",
"keys @return dict \"\"\" def generateMany(self, principal, numPayments, currency, borrowerKey, lenders) : proposals",
": # generate loanKey loan = self.generateLoanKey(loan) # convert values to NumberDecimal loan.convertDecimalToBson()",
"proposals @param float principal @param int numPayments @param list lenders : list of",
"loan.convertBsonToDecimal() return loan \"\"\" Looks for next scheduled payment for this borrower @param",
"scheduled payment for this borrower @param string borrowerKey @param float amtPaid @param biglittle.entity.loan.Loan",
"= random.randint(1000,20000) / 1000 # add a new proposal doc = self.generateProposal(principal, numPayments,",
"amount paid to decimal.Decimal for processing purposes if not isinstance(amtPaid, Decimal) : amtPaid",
"self.collection.find_one({\"borrowerKey\":borrowerKey}) if loan : loanInfo = loan.getLoanInfo() amtDue = loanInfo.get('monthlyPymt').to_decimal() return amtDue \"\"\"",
"# add a new proposal doc = self.generateProposal(principal, numPayments, annualRate, currency, borrowerKey, item['key'],",
"loan for given borrower Converts all BSON Decimal128 financial data fields to Decimal",
"numPayments, 'annualRate' : annualRate, 'effectiveRate' : effective_rate * 1000, 'currency' : currency, 'monthlyPymt'",
"M = P * ( J / (1 - (1 + J)**-N)) M",
"from decimal import Decimal from bson.decimal128 import Decimal128 from utils.utils import Utils from",
"lender keys @return dict \"\"\" def generateMany(self, principal, numPayments, currency, borrowerKey, lenders) :",
"@param biglittle.entity.loan.Loan loan @return bool True if success else False \"\"\" def save(self,",
"Generates a proposal Formula: M = P * ( J / (1 -",
"{ 'borrowerKey' : borrowerKey } result = self.collection.replace_one(filt,loan) # send out pub/sub notifications",
"M == monthly payment P == principal J == effective rate N ==",
"proposal doc = self.generateProposal(principal, numPayments, annualRate, currency, borrowerKey, item['key'], item['name'], item['business']) proposals.update({ item['key']",
"amtPaid = Decimal(amtPaid) # find first payment where nothing has been paid for",
": loanInfo = loan.getLoanInfo() amtDue = loanInfo.get('monthlyPymt').to_decimal() return amtDue \"\"\" Retrieves loan for",
"@param string currency @param string borrowerKey @param string lenderKey @param string lenderName @param",
"principal J == effective rate N == total number of payments @param float",
"Payment, LoanInfo, Loan class LoanService(Base) : # defaults collectName = 'biglittle.loans' \"\"\" Generates",
"'numPayments' : numPayments, 'annualRate' : annualRate, 'effectiveRate' : effective_rate * 1000, 'currency' :",
"amtDue = loanInfo.getMonthlyPayment() overpayment = 0.00 # convert amount paid to decimal.Decimal for",
"biglittle.domain.loan Description: module which interacts with the \"loans\" collection \"\"\" import pymongo import",
"== effective rate N == total number of payments @param float principal @param",
"\"\"\" def generateProposal(self, principal, numPayments, annualRate, currency, borrowerKey, lenderKey, lenderName, lenderBusiness) : #",
"# apply payment doc['amountPaid'] = doc['amountDue'] doc['status'] = 'received' # save var today",
"= Config() utils = Utils() result = False loanInfo = loan.getLoanInfo() amtDue =",
"Decimal @param string borrowerKey @return biglittle.entity.users.User instance \"\"\" def fetchLoanByBorrowerKey(self, borrowerKey) : loan",
"LoanInfo and Loan documents loanInfo = { 'principal' : principal, 'numPayments' : numPayments,",
"success else False \"\"\" def save(self, loan) : # generate loanKey loan =",
"self.collection.replace_one(filt,loan) # send out pub/sub notifications if result : # convert values to",
"for item in lenders : # pick an annual rate at random import",
"further processing if amtPaid < amtDue : overpayment = amtPaid else : overpayment",
"gmtime, strftime date = strftime('%Y%m%d', gmtime()) loan['loanKey'] = loan['borrowerKey'] + '_' + loan['lenderKey']",
"@return dict \"\"\" def generateProposal(self, principal, numPayments, annualRate, currency, borrowerKey, lenderKey, lenderName, lenderBusiness)",
"number of payments @param float principal @param int numPayments @param float annualRate @param",
"of lender keys @return dict \"\"\" def generateMany(self, principal, numPayments, currency, borrowerKey, lenders)",
"NumberDecimal loan.convertDecimalToBson() # update by replacement filt = { 'borrowerKey' : borrowerKey }",
"loan.set('overpayment', currentOver + overpayment) # convert values to NumberDecimal loan.convertDecimalToBson() # update by",
"if not isinstance(amtPaid, Decimal) : amtPaid = Decimal(amtPaid) # find first payment where",
"def generateProposal(self, principal, numPayments, annualRate, currency, borrowerKey, lenderKey, lenderName, lenderBusiness) : # calc",
"gmtime()) doc['recvdate'] = now break # update overpayment field currentOver = loan.get('overpayment') loan.set('overpayment',",
"principal * ( effective_rate / (1 - (1 + effective_rate) ** -numPayments ))",
"loanInfo, 'payments' : [] } return loan \"\"\" Generates a series of simulated",
"= loan.getLoanInfo() amtDue = loanInfo.get('monthlyPymt').to_decimal() return amtDue \"\"\" Retrieves loan for given borrower",
"} return loan \"\"\" Generates a series of simulated proposals @param float principal",
"result : # convert values to Decimal loan.convertBsonToDecimal() # have publisher notify subscribers",
"+ date return loan \"\"\" Saves loan document @param biglittle.entity.loan.Loan loan @return bool",
"amtDue # apply payment doc['amountPaid'] = doc['amountDue'] doc['status'] = 'received' # save var",
"else : overpayment = amtPaid - amtDue # apply payment doc['amountPaid'] = doc['amountDue']",
"generateProposal(self, principal, numPayments, annualRate, currency, borrowerKey, lenderKey, lenderName, lenderBusiness) : # calc effective",
"total number of payments @param float principal @param int numPayments @param float annualRate",
"import decimal from config.config import Config from pymongo.cursor import CursorType from decimal import",
"+ effective_rate) ** -numPayments )) # create LoanInfo and Loan documents loanInfo =",
"annualRate, currency, borrowerKey, item['key'], item['name'], item['business']) proposals.update({ item['key'] : doc }) return proposals",
"new proposal doc = self.generateProposal(principal, numPayments, annualRate, currency, borrowerKey, item['key'], item['name'], item['business']) proposals.update({",
"@param string borrowerKey @param float amtPaid @param biglittle.entity.loan.Loan loan @return bool True if",
"| False otherwise \"\"\" def processPayment(self, borrowerKey, amtPaid, loan) : # init vars",
"self.collection.insert(loan) \"\"\" Retrieves amount due for given borrower @param string borrowerKey @return Decimal",
"Formula: M = P * ( J / (1 - (1 + J)**-N))",
"a series of simulated proposals @param float principal @param int numPayments @param list",
"'overpayment' : 0.00, 'loanInfo' : loanInfo, 'payments' : [] } return loan \"\"\"",
"else False \"\"\" def save(self, loan) : # generate loanKey loan = self.generateLoanKey(loan)",
"# create LoanInfo and Loan documents loanInfo = { 'principal' : principal, 'numPayments'",
"Decimal128 from utils.utils import Utils from biglittle.domain.base import Base from biglittle.entity.loan import Payment,",
"<reponame>PacktPublishing/-Learn-MongoDB-4.0 \"\"\" biglittle.domain.loan Description: module which interacts with the \"loans\" collection \"\"\" import",
"paid to decimal.Decimal for processing purposes if not isinstance(amtPaid, Decimal) : amtPaid =",
"not isinstance(amtPaid, Decimal) : amtPaid = Decimal(amtPaid) # find first payment where nothing",
"Retrieves amount due for given borrower @param string borrowerKey @return Decimal amtDue \"\"\"",
"dict \"\"\" def generateMany(self, principal, numPayments, currency, borrowerKey, lenders) : proposals = {}",
"\"\"\" Generates loan key @param biglittle.entity.loan.Loan loan @return string loanKey \"\"\" def generateLoanKey(self,",
"biglittle.domain.base import Base from biglittle.entity.loan import Payment, LoanInfo, Loan class LoanService(Base) : #",
"@param string borrowerKey @return biglittle.entity.users.User instance \"\"\" def fetchLoanByBorrowerKey(self, borrowerKey) : loan =",
"loanInfo = loan.getLoanInfo() amtDue = loanInfo.get('monthlyPymt').to_decimal() return amtDue \"\"\" Retrieves loan for given",
"return proposals \"\"\" Generates loan key @param biglittle.entity.loan.Loan loan @return string loanKey \"\"\"",
"currency, borrowerKey, lenders) : proposals = {} for item in lenders : #",
"amtDue \"\"\" def fetchAmtDueForBorrower(self, borrowerKey) : amtDue = Decimal(0.00) loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) if",
"for doc in loan['payments'] : if doc['amountPaid'] == 0 : # if underpayment,",
"values to NumberDecimal loan.convertDecimalToBson() # update by replacement filt = { 'borrowerKey' :",
"( effective_rate / (1 - (1 + effective_rate) ** -numPayments )) # create",
"lenderName @param string lenderBusiness @return dict \"\"\" def generateProposal(self, principal, numPayments, annualRate, currency,",
"@return bool True if success else False \"\"\" def save(self, loan) : #",
"principal @param int numPayments @param list lenders : list of lender keys @return",
"currency @param string borrowerKey @param string lenderKey @param string lenderName @param string lenderBusiness",
"[] } return loan \"\"\" Generates a series of simulated proposals @param float",
"utils = Utils() result = False loanInfo = loan.getLoanInfo() amtDue = loanInfo.getMonthlyPayment() overpayment",
"loan @return bool True if payment processed OK | False otherwise \"\"\" def",
"borrowerKey, amtPaid, loan) : # init vars config = Config() utils = Utils()",
"in loan['payments'] : if doc['amountPaid'] == 0 : # if underpayment, add to",
"self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal() return loan \"\"\" Looks for next scheduled payment for this borrower",
"def fetchAmtDueForBorrower(self, borrowerKey) : amtDue = Decimal(0.00) loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) if loan :",
"currentOver = loan.get('overpayment') loan.set('overpayment', currentOver + overpayment) # convert values to NumberDecimal loan.convertDecimalToBson()",
": # convert values to Decimal loan.convertBsonToDecimal() # have publisher notify subscribers self.publisher.trigger(self.publisher.EVENT_LOAN_UPDATE_BORROWER,",
"rate at random import random annualRate = random.randint(1000,20000) / 1000 # add a",
"field currentOver = loan.get('overpayment') loan.set('overpayment', currentOver + overpayment) # convert values to NumberDecimal",
"borrowerKey) : amtDue = Decimal(0.00) loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) if loan : loanInfo =",
"loanInfo.get('monthlyPymt').to_decimal() return amtDue \"\"\" Retrieves loan for given borrower Converts all BSON Decimal128",
"loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) loan.convertBsonToDecimal() return loan \"\"\" Looks for next scheduled payment for",
"'lenderName' : lenderName, 'lenderBusiness' : lenderBusiness, 'overpayment' : 0.00, 'loanInfo' : loanInfo, 'payments'",
"doc = self.generateProposal(principal, numPayments, annualRate, currency, borrowerKey, item['key'], item['name'], item['business']) proposals.update({ item['key'] :",
"doc['status'] = 'received' # save var today from time import gmtime, strftime now",
"'principal' : principal, 'numPayments' : numPayments, 'annualRate' : annualRate, 'effectiveRate' : effective_rate *",
"loan key @param biglittle.entity.loan.Loan loan @return string loanKey \"\"\" def generateLoanKey(self, loan) :",
"with the \"loans\" collection \"\"\" import pymongo import decimal from config.config import Config",
"gmtime()) loan['loanKey'] = loan['borrowerKey'] + '_' + loan['lenderKey'] + '_' + date return",
"otherwise \"\"\" def processPayment(self, borrowerKey, amtPaid, loan) : # init vars config =",
"lenders) : proposals = {} for item in lenders : # pick an",
"strftime('%Y-%m-%d', gmtime()) doc['recvdate'] = now break # update overpayment field currentOver = loan.get('overpayment')",
"+ J)**-N)) M == monthly payment P == principal J == effective rate",
"\"\"\" Retrieves amount due for given borrower @param string borrowerKey @return Decimal amtDue",
"'currency' : currency, 'monthlyPymt' : monthly_payment } loan = { 'borrowerKey' : borrowerKey,",
"(1 + effective_rate) ** -numPayments )) # create LoanInfo and Loan documents loanInfo",
"@param string lenderName @param string lenderBusiness @return dict \"\"\" def generateProposal(self, principal, numPayments,",
"loan.get('overpayment') loan.set('overpayment', currentOver + overpayment) # convert values to NumberDecimal loan.convertDecimalToBson() # update",
"loan['lenderKey'] + '_' + date return loan \"\"\" Saves loan document @param biglittle.entity.loan.Loan",
"decimal from config.config import Config from pymongo.cursor import CursorType from decimal import Decimal",
"overpayment field currentOver = loan.get('overpayment') loan.set('overpayment', currentOver + overpayment) # convert values to",
": # if underpayment, add to \"overpayment\" but do no further processing if",
": # init vars config = Config() utils = Utils() result = False",
"to NumberDecimal loan.convertDecimalToBson() # save return self.collection.insert(loan) \"\"\" Retrieves amount due for given",
": amtDue = Decimal(0.00) loan = self.collection.find_one({\"borrowerKey\":borrowerKey}) if loan : loanInfo = loan.getLoanInfo()",
"in lenders : # pick an annual rate at random import random annualRate",
": borrowerKey, 'lenderKey' : lenderKey, 'lenderName' : lenderName, 'lenderBusiness' : lenderBusiness, 'overpayment' :",
"random.randint(1000,20000) / 1000 # add a new proposal doc = self.generateProposal(principal, numPayments, annualRate,",
"for given borrower @param string borrowerKey @return Decimal amtDue \"\"\" def fetchAmtDueForBorrower(self, borrowerKey)",
"'_' + loan['lenderKey'] + '_' + date return loan \"\"\" Saves loan document",
"currency, borrowerKey, lenderKey, lenderName, lenderBusiness) : # calc effective rate and monthly payment",
"bool True if success else False \"\"\" def save(self, loan) : # generate",
"{ 'borrowerKey' : borrowerKey, 'lenderKey' : lenderKey, 'lenderName' : lenderName, 'lenderBusiness' : lenderBusiness,",
"for next scheduled payment for this borrower @param string borrowerKey @param float amtPaid",
"'borrowerKey' : borrowerKey, 'lenderKey' : lenderKey, 'lenderName' : lenderName, 'lenderBusiness' : lenderBusiness, 'overpayment'",
"simulated proposals @param float principal @param int numPayments @param list lenders : list",
"convert values to Decimal loan.convertBsonToDecimal() # have publisher notify subscribers self.publisher.trigger(self.publisher.EVENT_LOAN_UPDATE_BORROWER, {'loan':loan, 'amtPaid':amtPaid})",
"from utils.utils import Utils from biglittle.domain.base import Base from biglittle.entity.loan import Payment, LoanInfo,",
"\"overpayment\" but do no further processing if amtPaid < amtDue : overpayment =",
"amtPaid, loan) : # init vars config = Config() utils = Utils() result",
"currency, 'monthlyPymt' : monthly_payment } loan = { 'borrowerKey' : borrowerKey, 'lenderKey' :",
"-numPayments )) # create LoanInfo and Loan documents loanInfo = { 'principal' :",
"string borrowerKey @param string lenderKey @param string lenderName @param string lenderBusiness @return dict",
"nothing has been paid for doc in loan['payments'] : if doc['amountPaid'] == 0",
"update by replacement filt = { 'borrowerKey' : borrowerKey } result = self.collection.replace_one(filt,loan)",
"import gmtime, strftime now = strftime('%Y-%m-%d', gmtime()) doc['recvdate'] = now break # update",
"'effectiveRate' : effective_rate * 1000, 'currency' : currency, 'monthlyPymt' : monthly_payment } loan",
"data fields to Decimal @param string borrowerKey @return biglittle.entity.users.User instance \"\"\" def fetchLoanByBorrowerKey(self,",
"amtDue \"\"\" Retrieves loan for given borrower Converts all BSON Decimal128 financial data",
": # pick an annual rate at random import random annualRate = random.randint(1000,20000)",
"borrowerKey @return Decimal amtDue \"\"\" def fetchAmtDueForBorrower(self, borrowerKey) : amtDue = Decimal(0.00) loan",
"CursorType from decimal import Decimal from bson.decimal128 import Decimal128 from utils.utils import Utils",
"result = self.collection.replace_one(filt,loan) # send out pub/sub notifications if result : # convert",
"loan['payments'] : if doc['amountPaid'] == 0 : # if underpayment, add to \"overpayment\"",
"# convert amount paid to decimal.Decimal for processing purposes if not isinstance(amtPaid, Decimal)",
"overpayment = amtPaid - amtDue # apply payment doc['amountPaid'] = doc['amountDue'] doc['status'] =",
"\"\"\" Generates a series of simulated proposals @param float principal @param int numPayments",
"# find first payment where nothing has been paid for doc in loan['payments']",
"= loan['borrowerKey'] + '_' + loan['lenderKey'] + '_' + date return loan \"\"\"",
"loan.getLoanInfo() amtDue = loanInfo.getMonthlyPayment() overpayment = 0.00 # convert amount paid to decimal.Decimal",
"and monthly payment effective_rate = annualRate / 100 / 12; monthly_payment = principal",
"lenderKey @param string lenderName @param string lenderBusiness @return dict \"\"\" def generateProposal(self, principal,",
"Utils from biglittle.domain.base import Base from biglittle.entity.loan import Payment, LoanInfo, Loan class LoanService(Base)",
"= self.collection.find_one({\"borrowerKey\":borrowerKey}) if loan : loanInfo = loan.getLoanInfo() amtDue = loanInfo.get('monthlyPymt').to_decimal() return amtDue",
"amtPaid else : overpayment = amtPaid - amtDue # apply payment doc['amountPaid'] =",
")) # create LoanInfo and Loan documents loanInfo = { 'principal' : principal,",
"= principal * ( effective_rate / (1 - (1 + effective_rate) ** -numPayments",
"Retrieves loan for given borrower Converts all BSON Decimal128 financial data fields to",
"False loanInfo = loan.getLoanInfo() amtDue = loanInfo.getMonthlyPayment() overpayment = 0.00 # convert amount",
"/ (1 - (1 + effective_rate) ** -numPayments )) # create LoanInfo and",
"loan['loanKey'] = loan['borrowerKey'] + '_' + loan['lenderKey'] + '_' + date return loan",
"effective rate and monthly payment effective_rate = annualRate / 100 / 12; monthly_payment",
"'borrowerKey' : borrowerKey } result = self.collection.replace_one(filt,loan) # send out pub/sub notifications if",
"\"\"\" def save(self, loan) : # generate loanKey loan = self.generateLoanKey(loan) # convert",
"- (1 + J)**-N)) M == monthly payment P == principal J ==",
"key @param biglittle.entity.loan.Loan loan @return string loanKey \"\"\" def generateLoanKey(self, loan) : from",
"now = strftime('%Y-%m-%d', gmtime()) doc['recvdate'] = now break # update overpayment field currentOver",
"= Decimal(amtPaid) # find first payment where nothing has been paid for doc",
"= doc['amountDue'] doc['status'] = 'received' # save var today from time import gmtime,",
"= strftime('%Y-%m-%d', gmtime()) doc['recvdate'] = now break # update overpayment field currentOver =",
"== total number of payments @param float principal @param int numPayments @param float",
"BSON Decimal128 financial data fields to Decimal @param string borrowerKey @return biglittle.entity.users.User instance",
"Decimal amtDue \"\"\" def fetchAmtDueForBorrower(self, borrowerKey) : amtDue = Decimal(0.00) loan = self.collection.find_one({\"borrowerKey\":borrowerKey})",
"return loan \"\"\" Generates a series of simulated proposals @param float principal @param"
] |
[
"os import environ dot_env_path = normpath(join(dirname(__file__), '../', '.env')) load_dotenv(dot_env_path) def simplest(auth): mail =",
"<reponame>swimlane/python-office365<filename>examples/retrieve_folders.py from office365api import Mail from dotenv import load_dotenv from os.path import join,",
"for f in sf: print(\" {id} {name}\".format(id=f.Id, name=f.DisplayName)) if __name__ == '__main__': authorization",
"in sf: print(\" {id} {name}\".format(id=f.Id, name=f.DisplayName)) if __name__ == '__main__': authorization = (environ.get('OFFICE_USER'),",
"= mail.folders.get_count() print('Folder count {0}'.format(c)) m = mail.folders.get_all_folders() print('Folder names.') for folder in",
"from office365api import Mail from dotenv import load_dotenv from os.path import join, dirname,",
"Mail(auth=auth) c = mail.folders.get_count() print('Folder count {0}'.format(c)) m = mail.folders.get_all_folders() print('Folder names.') for",
"dirname, normpath from os import environ dot_env_path = normpath(join(dirname(__file__), '../', '.env')) load_dotenv(dot_env_path) def",
"for folder in (f for f in m if f.ChildFolderCount > 0): f_info",
"'.env')) load_dotenv(dot_env_path) def simplest(auth): mail = Mail(auth=auth) c = mail.folders.get_count() print('Folder count {0}'.format(c))",
"m = mail.folders.get_all_folders() print('Folder names.') for folder in m: print(\" {id} {name}\".format(id=folder.Id, name=folder.DisplayName))",
"sf: print(\" {id} {name}\".format(id=f.Id, name=f.DisplayName)) if __name__ == '__main__': authorization = (environ.get('OFFICE_USER'), environ.get('OFFICE_USER_PASSWORD'))",
"= normpath(join(dirname(__file__), '../', '.env')) load_dotenv(dot_env_path) def simplest(auth): mail = Mail(auth=auth) c = mail.folders.get_count()",
"dotenv import load_dotenv from os.path import join, dirname, normpath from os import environ",
"in m: print(\" {id} {name}\".format(id=folder.Id, name=folder.DisplayName)) for folder in (f for f in",
"0): f_info = mail.folders.get_folder(folder_id=folder.Id) print('Subfolders of {name}'.format(name=folder.DisplayName)) sf = mail.folders.get_sub_folders(folder.Id) for f in",
"import environ dot_env_path = normpath(join(dirname(__file__), '../', '.env')) load_dotenv(dot_env_path) def simplest(auth): mail = Mail(auth=auth)",
"dot_env_path = normpath(join(dirname(__file__), '../', '.env')) load_dotenv(dot_env_path) def simplest(auth): mail = Mail(auth=auth) c =",
"load_dotenv(dot_env_path) def simplest(auth): mail = Mail(auth=auth) c = mail.folders.get_count() print('Folder count {0}'.format(c)) m",
"def simplest(auth): mail = Mail(auth=auth) c = mail.folders.get_count() print('Folder count {0}'.format(c)) m =",
"{id} {name}\".format(id=folder.Id, name=folder.DisplayName)) for folder in (f for f in m if f.ChildFolderCount",
"c = mail.folders.get_count() print('Folder count {0}'.format(c)) m = mail.folders.get_all_folders() print('Folder names.') for folder",
"name=folder.DisplayName)) for folder in (f for f in m if f.ChildFolderCount > 0):",
"{0}'.format(c)) m = mail.folders.get_all_folders() print('Folder names.') for folder in m: print(\" {id} {name}\".format(id=folder.Id,",
"f.ChildFolderCount > 0): f_info = mail.folders.get_folder(folder_id=folder.Id) print('Subfolders of {name}'.format(name=folder.DisplayName)) sf = mail.folders.get_sub_folders(folder.Id) for",
"f_info = mail.folders.get_folder(folder_id=folder.Id) print('Subfolders of {name}'.format(name=folder.DisplayName)) sf = mail.folders.get_sub_folders(folder.Id) for f in sf:",
"of {name}'.format(name=folder.DisplayName)) sf = mail.folders.get_sub_folders(folder.Id) for f in sf: print(\" {id} {name}\".format(id=f.Id, name=f.DisplayName))",
"= mail.folders.get_sub_folders(folder.Id) for f in sf: print(\" {id} {name}\".format(id=f.Id, name=f.DisplayName)) if __name__ ==",
"simplest(auth): mail = Mail(auth=auth) c = mail.folders.get_count() print('Folder count {0}'.format(c)) m = mail.folders.get_all_folders()",
"= Mail(auth=auth) c = mail.folders.get_count() print('Folder count {0}'.format(c)) m = mail.folders.get_all_folders() print('Folder names.')",
"print('Folder names.') for folder in m: print(\" {id} {name}\".format(id=folder.Id, name=folder.DisplayName)) for folder in",
"m: print(\" {id} {name}\".format(id=folder.Id, name=folder.DisplayName)) for folder in (f for f in m",
"{name}'.format(name=folder.DisplayName)) sf = mail.folders.get_sub_folders(folder.Id) for f in sf: print(\" {id} {name}\".format(id=f.Id, name=f.DisplayName)) if",
"f in m if f.ChildFolderCount > 0): f_info = mail.folders.get_folder(folder_id=folder.Id) print('Subfolders of {name}'.format(name=folder.DisplayName))",
"if f.ChildFolderCount > 0): f_info = mail.folders.get_folder(folder_id=folder.Id) print('Subfolders of {name}'.format(name=folder.DisplayName)) sf = mail.folders.get_sub_folders(folder.Id)",
"os.path import join, dirname, normpath from os import environ dot_env_path = normpath(join(dirname(__file__), '../',",
"office365api import Mail from dotenv import load_dotenv from os.path import join, dirname, normpath",
"mail = Mail(auth=auth) c = mail.folders.get_count() print('Folder count {0}'.format(c)) m = mail.folders.get_all_folders() print('Folder",
"mail.folders.get_count() print('Folder count {0}'.format(c)) m = mail.folders.get_all_folders() print('Folder names.') for folder in m:",
"folder in (f for f in m if f.ChildFolderCount > 0): f_info =",
"= mail.folders.get_all_folders() print('Folder names.') for folder in m: print(\" {id} {name}\".format(id=folder.Id, name=folder.DisplayName)) for",
"folder in m: print(\" {id} {name}\".format(id=folder.Id, name=folder.DisplayName)) for folder in (f for f",
"from os import environ dot_env_path = normpath(join(dirname(__file__), '../', '.env')) load_dotenv(dot_env_path) def simplest(auth): mail",
"load_dotenv from os.path import join, dirname, normpath from os import environ dot_env_path =",
"import Mail from dotenv import load_dotenv from os.path import join, dirname, normpath from",
"from dotenv import load_dotenv from os.path import join, dirname, normpath from os import",
"count {0}'.format(c)) m = mail.folders.get_all_folders() print('Folder names.') for folder in m: print(\" {id}",
"join, dirname, normpath from os import environ dot_env_path = normpath(join(dirname(__file__), '../', '.env')) load_dotenv(dot_env_path)",
"mail.folders.get_folder(folder_id=folder.Id) print('Subfolders of {name}'.format(name=folder.DisplayName)) sf = mail.folders.get_sub_folders(folder.Id) for f in sf: print(\" {id}",
"from os.path import join, dirname, normpath from os import environ dot_env_path = normpath(join(dirname(__file__),",
"Mail from dotenv import load_dotenv from os.path import join, dirname, normpath from os",
"sf = mail.folders.get_sub_folders(folder.Id) for f in sf: print(\" {id} {name}\".format(id=f.Id, name=f.DisplayName)) if __name__",
"in (f for f in m if f.ChildFolderCount > 0): f_info = mail.folders.get_folder(folder_id=folder.Id)",
"m if f.ChildFolderCount > 0): f_info = mail.folders.get_folder(folder_id=folder.Id) print('Subfolders of {name}'.format(name=folder.DisplayName)) sf =",
"for folder in m: print(\" {id} {name}\".format(id=folder.Id, name=folder.DisplayName)) for folder in (f for",
"print('Subfolders of {name}'.format(name=folder.DisplayName)) sf = mail.folders.get_sub_folders(folder.Id) for f in sf: print(\" {id} {name}\".format(id=f.Id,",
"mail.folders.get_sub_folders(folder.Id) for f in sf: print(\" {id} {name}\".format(id=f.Id, name=f.DisplayName)) if __name__ == '__main__':",
"print('Folder count {0}'.format(c)) m = mail.folders.get_all_folders() print('Folder names.') for folder in m: print(\"",
"print(\" {id} {name}\".format(id=f.Id, name=f.DisplayName)) if __name__ == '__main__': authorization = (environ.get('OFFICE_USER'), environ.get('OFFICE_USER_PASSWORD')) simplest(authorization)",
"for f in m if f.ChildFolderCount > 0): f_info = mail.folders.get_folder(folder_id=folder.Id) print('Subfolders of",
"= mail.folders.get_folder(folder_id=folder.Id) print('Subfolders of {name}'.format(name=folder.DisplayName)) sf = mail.folders.get_sub_folders(folder.Id) for f in sf: print(\"",
"names.') for folder in m: print(\" {id} {name}\".format(id=folder.Id, name=folder.DisplayName)) for folder in (f",
"f in sf: print(\" {id} {name}\".format(id=f.Id, name=f.DisplayName)) if __name__ == '__main__': authorization =",
"> 0): f_info = mail.folders.get_folder(folder_id=folder.Id) print('Subfolders of {name}'.format(name=folder.DisplayName)) sf = mail.folders.get_sub_folders(folder.Id) for f",
"import load_dotenv from os.path import join, dirname, normpath from os import environ dot_env_path",
"normpath from os import environ dot_env_path = normpath(join(dirname(__file__), '../', '.env')) load_dotenv(dot_env_path) def simplest(auth):",
"(f for f in m if f.ChildFolderCount > 0): f_info = mail.folders.get_folder(folder_id=folder.Id) print('Subfolders",
"mail.folders.get_all_folders() print('Folder names.') for folder in m: print(\" {id} {name}\".format(id=folder.Id, name=folder.DisplayName)) for folder",
"import join, dirname, normpath from os import environ dot_env_path = normpath(join(dirname(__file__), '../', '.env'))",
"normpath(join(dirname(__file__), '../', '.env')) load_dotenv(dot_env_path) def simplest(auth): mail = Mail(auth=auth) c = mail.folders.get_count() print('Folder",
"'../', '.env')) load_dotenv(dot_env_path) def simplest(auth): mail = Mail(auth=auth) c = mail.folders.get_count() print('Folder count",
"in m if f.ChildFolderCount > 0): f_info = mail.folders.get_folder(folder_id=folder.Id) print('Subfolders of {name}'.format(name=folder.DisplayName)) sf",
"{name}\".format(id=folder.Id, name=folder.DisplayName)) for folder in (f for f in m if f.ChildFolderCount >",
"environ dot_env_path = normpath(join(dirname(__file__), '../', '.env')) load_dotenv(dot_env_path) def simplest(auth): mail = Mail(auth=auth) c",
"print(\" {id} {name}\".format(id=folder.Id, name=folder.DisplayName)) for folder in (f for f in m if"
] |
[
"program that trains the model for identifying a known set of document classes",
"trains the model for identifying a known set of document classes Author:: <NAME>",
"the model for identifying a known set of document classes Author:: <NAME> Date::",
"model for identifying a known set of document classes Author:: <NAME> Date:: 04/02/2019",
"Identifier Brief:: Implementation of the program that trains the model for identifying a",
"of the program that trains the model for identifying a known set of",
"that trains the model for identifying a known set of document classes Author::",
"the program that trains the model for identifying a known set of document",
"Brief:: Implementation of the program that trains the model for identifying a known",
"for identifying a known set of document classes Author:: <NAME> Date:: 04/02/2019 \"\"\"",
"\"\"\" Title:: File Structure Identifier Brief:: Implementation of the program that trains the",
"Structure Identifier Brief:: Implementation of the program that trains the model for identifying",
"File Structure Identifier Brief:: Implementation of the program that trains the model for",
"Title:: File Structure Identifier Brief:: Implementation of the program that trains the model",
"Implementation of the program that trains the model for identifying a known set"
] |
[
"import SMTP_SSL import requests import config class Notice: def __init__(self, admin: dict, account:",
"str): url = 'https://www.pushplus.plus/send' headers = {\"Content-Type\": \"application/json\"} data = json.dumps({ \"token\": self.account.get(\"pushToken\"),",
"# 收件人邮箱 receiver = self.account.get(\"mail\") # 邮件标题 mail_title = \"易班 \" + time.strftime(\"%Y-%m-%d\",",
"time.localtime(int(time.time()))) + \" 签到情况\" # ssl登录 smtp = SMTP_SSL(host_server) smtp.ehlo(host_server) smtp.login(sendMail, authCode) msg",
"= SMTP_SSL(host_server) smtp.ehlo(host_server) smtp.login(sendMail, authCode) msg = MIMEText(message, \"html\", 'utf-8') msg[\"Subject\"] = Header(mail_title,",
"== \"\" or self.account.get(\"notice\") == \"local\": return Notice.saveLocal(content) elif self.account.get(\"notice\") == \"mail\": if",
"\"\": print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content) else: self.send_mail(content) else: self.sendPushPlus(content) print(content) def send_mail(self, message: str): try:",
"邮件标题 mail_title = \"易班 \" + time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time()))) + \" 签到情况\" # ssl登录",
"if response['code'] == 200: return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\") else: print(\"发送失败,转为本地记录\") Notice.saveLocal(content) return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因:",
"= requests.post(url=url, data=data, headers=headers).json() if response['code'] == 200: return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\") else: print(\"发送失败,转为本地记录\")",
"import config class Notice: def __init__(self, admin: dict, account: dict): self.admin = admin,",
"True except Exception as e: print(e) return False # 发送pushPlus def sendPushPlus(self, content:",
"\"token\": self.account.get(\"pushToken\"), \"title\": \"易班签到通知\", \"content\": content, \"template\": \"txt\" }) response = requests.post(url=url, data=data,",
"headers=headers).json() if response['code'] == 200: return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\") else: print(\"发送失败,转为本地记录\") Notice.saveLocal(content) return Notice.log(f\"{self.account.get('mobile')}\\tPush",
"# 邮件标题 mail_title = \"易班 \" + time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time()))) + \" 签到情况\" #",
"receiver, msg.as_string()) smtp.quit() return True except Exception as e: print(e) return False #",
"authCode) msg = MIMEText(message, \"html\", 'utf-8') msg[\"Subject\"] = Header(mail_title, 'utf-8') msg[\"From\"] = sendMail",
"import requests import config class Notice: def __init__(self, admin: dict, account: dict): self.admin",
"self.account.get(\"notice\") == \"local\": return Notice.saveLocal(content) elif self.account.get(\"notice\") == \"mail\": if self.admin[0][\"mail\"][\"sendMail\"] == \"\"",
"邮箱的授权码 authCode = self.admin[0][\"mail\"][\"authCode\"] # 收件人邮箱 receiver = self.account.get(\"mail\") # 邮件标题 mail_title =",
"return True except Exception as e: print(e) return False # 发送pushPlus def sendPushPlus(self,",
"except Exception as e: print(e) return False # 发送pushPlus def sendPushPlus(self, content: str):",
"self.admin[0][\"mail\"][\"sendMail\"] == \"\" and self.admin[0][\"mail\"][\"authCode\"] == \"\": print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content) else: self.send_mail(content) else: self.sendPushPlus(content)",
"print(e) return False # 发送pushPlus def sendPushPlus(self, content: str): url = 'https://www.pushplus.plus/send' headers",
"sendMail = self.admin[0][\"mail\"][\"sendMail\"] # 邮箱的授权码 authCode = self.admin[0][\"mail\"][\"authCode\"] # 收件人邮箱 receiver = self.account.get(\"mail\")",
"\"易班签到通知\", \"content\": content, \"template\": \"txt\" }) response = requests.post(url=url, data=data, headers=headers).json() if response['code']",
"\"content\": content, \"template\": \"txt\" }) response = requests.post(url=url, data=data, headers=headers).json() if response['code'] ==",
"try: host_server = self.admin[0][\"mail\"][\"smtpServer\"] # 发件人的邮箱 sendMail = self.admin[0][\"mail\"][\"sendMail\"] # 邮箱的授权码 authCode =",
"send(self, content): if self.account.get(\"notice\") == \"\" or self.account.get(\"notice\") == \"local\": return Notice.saveLocal(content) elif",
"Header from smtplib import SMTP_SSL import requests import config class Notice: def __init__(self,",
"authCode = self.admin[0][\"mail\"][\"authCode\"] # 收件人邮箱 receiver = self.account.get(\"mail\") # 邮件标题 mail_title = \"易班",
"# 邮箱的授权码 authCode = self.admin[0][\"mail\"][\"authCode\"] # 收件人邮箱 receiver = self.account.get(\"mail\") # 邮件标题 mail_title",
"smtp.ehlo(host_server) smtp.login(sendMail, authCode) msg = MIMEText(message, \"html\", 'utf-8') msg[\"Subject\"] = Header(mail_title, 'utf-8') msg[\"From\"]",
"msg = MIMEText(message, \"html\", 'utf-8') msg[\"Subject\"] = Header(mail_title, 'utf-8') msg[\"From\"] = sendMail msg[\"To\"]",
"json.dumps({ \"token\": self.account.get(\"pushToken\"), \"title\": \"易班签到通知\", \"content\": content, \"template\": \"txt\" }) response = requests.post(url=url,",
"__init__(self, admin: dict, account: dict): self.admin = admin, self.account = account def send(self,",
"def log(message: str): with open(file=\"data/logs.log\", mode=\"a+\", encoding=\"utf-8\") as f: f.write(message) print(message) @staticmethod def",
"self.account = account def send(self, content): if self.account.get(\"notice\") == \"\" or self.account.get(\"notice\") ==",
"== \"\" and self.admin[0][\"mail\"][\"authCode\"] == \"\": print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content) else: self.send_mail(content) else: self.sendPushPlus(content) print(content)",
"self.admin[0][\"mail\"][\"authCode\"] == \"\": print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content) else: self.send_mail(content) else: self.sendPushPlus(content) print(content) def send_mail(self, message:",
"print(content) def send_mail(self, message: str): try: host_server = self.admin[0][\"mail\"][\"smtpServer\"] # 发件人的邮箱 sendMail =",
"smtp.login(sendMail, authCode) msg = MIMEText(message, \"html\", 'utf-8') msg[\"Subject\"] = Header(mail_title, 'utf-8') msg[\"From\"] =",
"self.account.get(\"pushToken\"), \"title\": \"易班签到通知\", \"content\": content, \"template\": \"txt\" }) response = requests.post(url=url, data=data, headers=headers).json()",
"签到情况\" # ssl登录 smtp = SMTP_SSL(host_server) smtp.ehlo(host_server) smtp.login(sendMail, authCode) msg = MIMEText(message, \"html\",",
"admin, self.account = account def send(self, content): if self.account.get(\"notice\") == \"\" or self.account.get(\"notice\")",
"else: self.send_mail(content) else: self.sendPushPlus(content) print(content) def send_mail(self, message: str): try: host_server = self.admin[0][\"mail\"][\"smtpServer\"]",
"mode=\"a+\", encoding=\"utf-8\") as f: f.write(message) print(message) @staticmethod def saveLocal(message): with open(\"data/result.log\", mode=\"a+\", encoding=\"utf-8\")",
"Exception as e: print(e) return False # 发送pushPlus def sendPushPlus(self, content: str): url",
"mail_title = \"易班 \" + time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time()))) + \" 签到情况\" # ssl登录 smtp",
"message: str): try: host_server = self.admin[0][\"mail\"][\"smtpServer\"] # 发件人的邮箱 sendMail = self.admin[0][\"mail\"][\"sendMail\"] # 邮箱的授权码",
"\" + time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time()))) + \" 签到情况\" # ssl登录 smtp = SMTP_SSL(host_server) smtp.ehlo(host_server)",
"and self.admin[0][\"mail\"][\"authCode\"] == \"\": print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content) else: self.send_mail(content) else: self.sendPushPlus(content) print(content) def send_mail(self,",
"url = 'https://www.pushplus.plus/send' headers = {\"Content-Type\": \"application/json\"} data = json.dumps({ \"token\": self.account.get(\"pushToken\"), \"title\":",
"log(message: str): with open(file=\"data/logs.log\", mode=\"a+\", encoding=\"utf-8\") as f: f.write(message) print(message) @staticmethod def saveLocal(message):",
"def sendPushPlus(self, content: str): url = 'https://www.pushplus.plus/send' headers = {\"Content-Type\": \"application/json\"} data =",
"import time from email.mime.text import MIMEText from email.header import Header from smtplib import",
"time from email.mime.text import MIMEText from email.header import Header from smtplib import SMTP_SSL",
"+ time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time()))) + \" 签到情况\" # ssl登录 smtp = SMTP_SSL(host_server) smtp.ehlo(host_server) smtp.login(sendMail,",
"self.admin[0][\"mail\"][\"smtpServer\"] # 发件人的邮箱 sendMail = self.admin[0][\"mail\"][\"sendMail\"] # 邮箱的授权码 authCode = self.admin[0][\"mail\"][\"authCode\"] # 收件人邮箱",
"def __init__(self, admin: dict, account: dict): self.admin = admin, self.account = account def",
"self.sendPushPlus(content) print(content) def send_mail(self, message: str): try: host_server = self.admin[0][\"mail\"][\"smtpServer\"] # 发件人的邮箱 sendMail",
"@staticmethod def log(message: str): with open(file=\"data/logs.log\", mode=\"a+\", encoding=\"utf-8\") as f: f.write(message) print(message) @staticmethod",
"dict, account: dict): self.admin = admin, self.account = account def send(self, content): if",
"\"local\": return Notice.saveLocal(content) elif self.account.get(\"notice\") == \"mail\": if self.admin[0][\"mail\"][\"sendMail\"] == \"\" and self.admin[0][\"mail\"][\"authCode\"]",
"= \"易班 \" + time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time()))) + \" 签到情况\" # ssl登录 smtp =",
"\"title\": \"易班签到通知\", \"content\": content, \"template\": \"txt\" }) response = requests.post(url=url, data=data, headers=headers).json() if",
"receiver = self.account.get(\"mail\") # 邮件标题 mail_title = \"易班 \" + time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time()))) +",
"+ \" 签到情况\" # ssl登录 smtp = SMTP_SSL(host_server) smtp.ehlo(host_server) smtp.login(sendMail, authCode) msg =",
"= self.admin[0][\"mail\"][\"smtpServer\"] # 发件人的邮箱 sendMail = self.admin[0][\"mail\"][\"sendMail\"] # 邮箱的授权码 authCode = self.admin[0][\"mail\"][\"authCode\"] #",
"'https://www.pushplus.plus/send' headers = {\"Content-Type\": \"application/json\"} data = json.dumps({ \"token\": self.account.get(\"pushToken\"), \"title\": \"易班签到通知\", \"content\":",
"13:00 @Auth : apecode @File :notice.py @IDE :PyCharm @Blog:https://liiuyangxiong.cn \"\"\" import json import",
"time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time()))) + \" 签到情况\" # ssl登录 smtp = SMTP_SSL(host_server) smtp.ehlo(host_server) smtp.login(sendMail, authCode)",
"admin: dict, account: dict): self.admin = admin, self.account = account def send(self, content):",
"smtp = SMTP_SSL(host_server) smtp.ehlo(host_server) smtp.login(sendMail, authCode) msg = MIMEText(message, \"html\", 'utf-8') msg[\"Subject\"] =",
"content: str): url = 'https://www.pushplus.plus/send' headers = {\"Content-Type\": \"application/json\"} data = json.dumps({ \"token\":",
"# 发送pushPlus def sendPushPlus(self, content: str): url = 'https://www.pushplus.plus/send' headers = {\"Content-Type\": \"application/json\"}",
"Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因: {response['msg']}\\n\") @staticmethod def log(message: str): with open(file=\"data/logs.log\", mode=\"a+\", encoding=\"utf-8\") as f:",
"-*- \"\"\" @Time : 2021/8/24 13:00 @Auth : apecode @File :notice.py @IDE :PyCharm",
"# 发件人的邮箱 sendMail = self.admin[0][\"mail\"][\"sendMail\"] # 邮箱的授权码 authCode = self.admin[0][\"mail\"][\"authCode\"] # 收件人邮箱 receiver",
"Plus发送失败!原因: {response['msg']}\\n\") @staticmethod def log(message: str): with open(file=\"data/logs.log\", mode=\"a+\", encoding=\"utf-8\") as f: f.write(message)",
"encoding=\"utf-8\") as f: f.write(message) print(message) @staticmethod def saveLocal(message): with open(\"data/result.log\", mode=\"a+\", encoding=\"utf-8\") as",
"self.send_mail(content) else: self.sendPushPlus(content) print(content) def send_mail(self, message: str): try: host_server = self.admin[0][\"mail\"][\"smtpServer\"] #",
"= json.dumps({ \"token\": self.account.get(\"pushToken\"), \"title\": \"易班签到通知\", \"content\": content, \"template\": \"txt\" }) response =",
"as e: print(e) return False # 发送pushPlus def sendPushPlus(self, content: str): url =",
"data=data, headers=headers).json() if response['code'] == 200: return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\") else: print(\"发送失败,转为本地记录\") Notice.saveLocal(content) return",
"Header(mail_title, 'utf-8') msg[\"From\"] = sendMail msg[\"To\"] = receiver smtp.sendmail(sendMail, receiver, msg.as_string()) smtp.quit() return",
"Notice.saveLocal(content) else: self.send_mail(content) else: self.sendPushPlus(content) print(content) def send_mail(self, message: str): try: host_server =",
"= self.admin[0][\"mail\"][\"authCode\"] # 收件人邮箱 receiver = self.account.get(\"mail\") # 邮件标题 mail_title = \"易班 \"",
":notice.py @IDE :PyCharm @Blog:https://liiuyangxiong.cn \"\"\" import json import time from email.mime.text import MIMEText",
"str): with open(file=\"data/logs.log\", mode=\"a+\", encoding=\"utf-8\") as f: f.write(message) print(message) @staticmethod def saveLocal(message): with",
"Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\") else: print(\"发送失败,转为本地记录\") Notice.saveLocal(content) return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因: {response['msg']}\\n\") @staticmethod def log(message: str):",
"= self.account.get(\"mail\") # 邮件标题 mail_title = \"易班 \" + time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time()))) + \"",
"Notice.saveLocal(content) return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因: {response['msg']}\\n\") @staticmethod def log(message: str): with open(file=\"data/logs.log\", mode=\"a+\", encoding=\"utf-8\")",
"= admin, self.account = account def send(self, content): if self.account.get(\"notice\") == \"\" or",
"response = requests.post(url=url, data=data, headers=headers).json() if response['code'] == 200: return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\") else:",
": apecode @File :notice.py @IDE :PyCharm @Blog:https://liiuyangxiong.cn \"\"\" import json import time from",
"2021/8/24 13:00 @Auth : apecode @File :notice.py @IDE :PyCharm @Blog:https://liiuyangxiong.cn \"\"\" import json",
"@Auth : apecode @File :notice.py @IDE :PyCharm @Blog:https://liiuyangxiong.cn \"\"\" import json import time",
"@Blog:https://liiuyangxiong.cn \"\"\" import json import time from email.mime.text import MIMEText from email.header import",
"\"易班 \" + time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time()))) + \" 签到情况\" # ssl登录 smtp = SMTP_SSL(host_server)",
"from smtplib import SMTP_SSL import requests import config class Notice: def __init__(self, admin:",
"Plus发送成功!\\n\") else: print(\"发送失败,转为本地记录\") Notice.saveLocal(content) return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因: {response['msg']}\\n\") @staticmethod def log(message: str): with",
"\"\" and self.admin[0][\"mail\"][\"authCode\"] == \"\": print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content) else: self.send_mail(content) else: self.sendPushPlus(content) print(content) def",
"self.admin = admin, self.account = account def send(self, content): if self.account.get(\"notice\") == \"\"",
"import json import time from email.mime.text import MIMEText from email.header import Header from",
"self.account.get(\"notice\") == \"\" or self.account.get(\"notice\") == \"local\": return Notice.saveLocal(content) elif self.account.get(\"notice\") == \"mail\":",
"class Notice: def __init__(self, admin: dict, account: dict): self.admin = admin, self.account =",
"== \"local\": return Notice.saveLocal(content) elif self.account.get(\"notice\") == \"mail\": if self.admin[0][\"mail\"][\"sendMail\"] == \"\" and",
"\"\"\" @Time : 2021/8/24 13:00 @Auth : apecode @File :notice.py @IDE :PyCharm @Blog:https://liiuyangxiong.cn",
"account def send(self, content): if self.account.get(\"notice\") == \"\" or self.account.get(\"notice\") == \"local\": return",
"False # 发送pushPlus def sendPushPlus(self, content: str): url = 'https://www.pushplus.plus/send' headers = {\"Content-Type\":",
"else: print(\"发送失败,转为本地记录\") Notice.saveLocal(content) return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因: {response['msg']}\\n\") @staticmethod def log(message: str): with open(file=\"data/logs.log\",",
"else: self.sendPushPlus(content) print(content) def send_mail(self, message: str): try: host_server = self.admin[0][\"mail\"][\"smtpServer\"] # 发件人的邮箱",
"print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content) else: self.send_mail(content) else: self.sendPushPlus(content) print(content) def send_mail(self, message: str): try: host_server",
"== \"\": print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content) else: self.send_mail(content) else: self.sendPushPlus(content) print(content) def send_mail(self, message: str):",
"dict): self.admin = admin, self.account = account def send(self, content): if self.account.get(\"notice\") ==",
"\"mail\": if self.admin[0][\"mail\"][\"sendMail\"] == \"\" and self.admin[0][\"mail\"][\"authCode\"] == \"\": print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content) else: self.send_mail(content)",
"MIMEText from email.header import Header from smtplib import SMTP_SSL import requests import config",
"发送pushPlus def sendPushPlus(self, content: str): url = 'https://www.pushplus.plus/send' headers = {\"Content-Type\": \"application/json\"} data",
"e: print(e) return False # 发送pushPlus def sendPushPlus(self, content: str): url = 'https://www.pushplus.plus/send'",
"@IDE :PyCharm @Blog:https://liiuyangxiong.cn \"\"\" import json import time from email.mime.text import MIMEText from",
"self.admin[0][\"mail\"][\"sendMail\"] # 邮箱的授权码 authCode = self.admin[0][\"mail\"][\"authCode\"] # 收件人邮箱 receiver = self.account.get(\"mail\") # 邮件标题",
"import Header from smtplib import SMTP_SSL import requests import config class Notice: def",
"from email.mime.text import MIMEText from email.header import Header from smtplib import SMTP_SSL import",
"with open(file=\"data/logs.log\", mode=\"a+\", encoding=\"utf-8\") as f: f.write(message) print(message) @staticmethod def saveLocal(message): with open(\"data/result.log\",",
"@File :notice.py @IDE :PyCharm @Blog:https://liiuyangxiong.cn \"\"\" import json import time from email.mime.text import",
"= Header(mail_title, 'utf-8') msg[\"From\"] = sendMail msg[\"To\"] = receiver smtp.sendmail(sendMail, receiver, msg.as_string()) smtp.quit()",
"self.admin[0][\"mail\"][\"authCode\"] # 收件人邮箱 receiver = self.account.get(\"mail\") # 邮件标题 mail_title = \"易班 \" +",
"smtp.sendmail(sendMail, receiver, msg.as_string()) smtp.quit() return True except Exception as e: print(e) return False",
": 2021/8/24 13:00 @Auth : apecode @File :notice.py @IDE :PyCharm @Blog:https://liiuyangxiong.cn \"\"\" import",
"import MIMEText from email.header import Header from smtplib import SMTP_SSL import requests import",
"发件人的邮箱 sendMail = self.admin[0][\"mail\"][\"sendMail\"] # 邮箱的授权码 authCode = self.admin[0][\"mail\"][\"authCode\"] # 收件人邮箱 receiver =",
"= 'https://www.pushplus.plus/send' headers = {\"Content-Type\": \"application/json\"} data = json.dumps({ \"token\": self.account.get(\"pushToken\"), \"title\": \"易班签到通知\",",
"= self.admin[0][\"mail\"][\"sendMail\"] # 邮箱的授权码 authCode = self.admin[0][\"mail\"][\"authCode\"] # 收件人邮箱 receiver = self.account.get(\"mail\") #",
"if self.account.get(\"notice\") == \"\" or self.account.get(\"notice\") == \"local\": return Notice.saveLocal(content) elif self.account.get(\"notice\") ==",
"sendMail msg[\"To\"] = receiver smtp.sendmail(sendMail, receiver, msg.as_string()) smtp.quit() return True except Exception as",
"f: f.write(message) print(message) @staticmethod def saveLocal(message): with open(\"data/result.log\", mode=\"a+\", encoding=\"utf-8\") as w: w.write(message)",
"\"\"\" import json import time from email.mime.text import MIMEText from email.header import Header",
"def send(self, content): if self.account.get(\"notice\") == \"\" or self.account.get(\"notice\") == \"local\": return Notice.saveLocal(content)",
"def send_mail(self, message: str): try: host_server = self.admin[0][\"mail\"][\"smtpServer\"] # 发件人的邮箱 sendMail = self.admin[0][\"mail\"][\"sendMail\"]",
"data = json.dumps({ \"token\": self.account.get(\"pushToken\"), \"title\": \"易班签到通知\", \"content\": content, \"template\": \"txt\" }) response",
"utf-8 -*- \"\"\" @Time : 2021/8/24 13:00 @Auth : apecode @File :notice.py @IDE",
"\"html\", 'utf-8') msg[\"Subject\"] = Header(mail_title, 'utf-8') msg[\"From\"] = sendMail msg[\"To\"] = receiver smtp.sendmail(sendMail,",
"\"template\": \"txt\" }) response = requests.post(url=url, data=data, headers=headers).json() if response['code'] == 200: return",
"}) response = requests.post(url=url, data=data, headers=headers).json() if response['code'] == 200: return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\")",
"Notice: def __init__(self, admin: dict, account: dict): self.admin = admin, self.account = account",
"return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因: {response['msg']}\\n\") @staticmethod def log(message: str): with open(file=\"data/logs.log\", mode=\"a+\", encoding=\"utf-8\") as",
"SMTP_SSL(host_server) smtp.ehlo(host_server) smtp.login(sendMail, authCode) msg = MIMEText(message, \"html\", 'utf-8') msg[\"Subject\"] = Header(mail_title, 'utf-8')",
"\"\" or self.account.get(\"notice\") == \"local\": return Notice.saveLocal(content) elif self.account.get(\"notice\") == \"mail\": if self.admin[0][\"mail\"][\"sendMail\"]",
"@Time : 2021/8/24 13:00 @Auth : apecode @File :notice.py @IDE :PyCharm @Blog:https://liiuyangxiong.cn \"\"\"",
"str): try: host_server = self.admin[0][\"mail\"][\"smtpServer\"] # 发件人的邮箱 sendMail = self.admin[0][\"mail\"][\"sendMail\"] # 邮箱的授权码 authCode",
"smtp.quit() return True except Exception as e: print(e) return False # 发送pushPlus def",
"open(file=\"data/logs.log\", mode=\"a+\", encoding=\"utf-8\") as f: f.write(message) print(message) @staticmethod def saveLocal(message): with open(\"data/result.log\", mode=\"a+\",",
"'utf-8') msg[\"From\"] = sendMail msg[\"To\"] = receiver smtp.sendmail(sendMail, receiver, msg.as_string()) smtp.quit() return True",
"send_mail(self, message: str): try: host_server = self.admin[0][\"mail\"][\"smtpServer\"] # 发件人的邮箱 sendMail = self.admin[0][\"mail\"][\"sendMail\"] #",
"elif self.account.get(\"notice\") == \"mail\": if self.admin[0][\"mail\"][\"sendMail\"] == \"\" and self.admin[0][\"mail\"][\"authCode\"] == \"\": print(\"未设置发送者邮箱信息,转为本地记录\")",
"SMTP_SSL import requests import config class Notice: def __init__(self, admin: dict, account: dict):",
"'utf-8') msg[\"Subject\"] = Header(mail_title, 'utf-8') msg[\"From\"] = sendMail msg[\"To\"] = receiver smtp.sendmail(sendMail, receiver,",
"sendPushPlus(self, content: str): url = 'https://www.pushplus.plus/send' headers = {\"Content-Type\": \"application/json\"} data = json.dumps({",
"email.header import Header from smtplib import SMTP_SSL import requests import config class Notice:",
"200: return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\") else: print(\"发送失败,转为本地记录\") Notice.saveLocal(content) return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因: {response['msg']}\\n\") @staticmethod def",
"= MIMEText(message, \"html\", 'utf-8') msg[\"Subject\"] = Header(mail_title, 'utf-8') msg[\"From\"] = sendMail msg[\"To\"] =",
"self.account.get(\"mail\") # 邮件标题 mail_title = \"易班 \" + time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time()))) + \" 签到情况\"",
"or self.account.get(\"notice\") == \"local\": return Notice.saveLocal(content) elif self.account.get(\"notice\") == \"mail\": if self.admin[0][\"mail\"][\"sendMail\"] ==",
"smtplib import SMTP_SSL import requests import config class Notice: def __init__(self, admin: dict,",
"= {\"Content-Type\": \"application/json\"} data = json.dumps({ \"token\": self.account.get(\"pushToken\"), \"title\": \"易班签到通知\", \"content\": content, \"template\":",
"return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\") else: print(\"发送失败,转为本地记录\") Notice.saveLocal(content) return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因: {response['msg']}\\n\") @staticmethod def log(message:",
"# -*- coding: utf-8 -*- \"\"\" @Time : 2021/8/24 13:00 @Auth : apecode",
"== \"mail\": if self.admin[0][\"mail\"][\"sendMail\"] == \"\" and self.admin[0][\"mail\"][\"authCode\"] == \"\": print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content) else:",
"\"application/json\"} data = json.dumps({ \"token\": self.account.get(\"pushToken\"), \"title\": \"易班签到通知\", \"content\": content, \"template\": \"txt\" })",
"content): if self.account.get(\"notice\") == \"\" or self.account.get(\"notice\") == \"local\": return Notice.saveLocal(content) elif self.account.get(\"notice\")",
"email.mime.text import MIMEText from email.header import Header from smtplib import SMTP_SSL import requests",
"{response['msg']}\\n\") @staticmethod def log(message: str): with open(file=\"data/logs.log\", mode=\"a+\", encoding=\"utf-8\") as f: f.write(message) print(message)",
"msg[\"From\"] = sendMail msg[\"To\"] = receiver smtp.sendmail(sendMail, receiver, msg.as_string()) smtp.quit() return True except",
"self.account.get(\"notice\") == \"mail\": if self.admin[0][\"mail\"][\"sendMail\"] == \"\" and self.admin[0][\"mail\"][\"authCode\"] == \"\": print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content)",
"Notice.saveLocal(content) elif self.account.get(\"notice\") == \"mail\": if self.admin[0][\"mail\"][\"sendMail\"] == \"\" and self.admin[0][\"mail\"][\"authCode\"] == \"\":",
"if self.admin[0][\"mail\"][\"sendMail\"] == \"\" and self.admin[0][\"mail\"][\"authCode\"] == \"\": print(\"未设置发送者邮箱信息,转为本地记录\") Notice.saveLocal(content) else: self.send_mail(content) else:",
"return False # 发送pushPlus def sendPushPlus(self, content: str): url = 'https://www.pushplus.plus/send' headers =",
"from email.header import Header from smtplib import SMTP_SSL import requests import config class",
"headers = {\"Content-Type\": \"application/json\"} data = json.dumps({ \"token\": self.account.get(\"pushToken\"), \"title\": \"易班签到通知\", \"content\": content,",
"coding: utf-8 -*- \"\"\" @Time : 2021/8/24 13:00 @Auth : apecode @File :notice.py",
"requests import config class Notice: def __init__(self, admin: dict, account: dict): self.admin =",
"= sendMail msg[\"To\"] = receiver smtp.sendmail(sendMail, receiver, msg.as_string()) smtp.quit() return True except Exception",
"= account def send(self, content): if self.account.get(\"notice\") == \"\" or self.account.get(\"notice\") == \"local\":",
"return Notice.saveLocal(content) elif self.account.get(\"notice\") == \"mail\": if self.admin[0][\"mail\"][\"sendMail\"] == \"\" and self.admin[0][\"mail\"][\"authCode\"] ==",
"account: dict): self.admin = admin, self.account = account def send(self, content): if self.account.get(\"notice\")",
"收件人邮箱 receiver = self.account.get(\"mail\") # 邮件标题 mail_title = \"易班 \" + time.strftime(\"%Y-%m-%d\", time.localtime(int(time.time())))",
"MIMEText(message, \"html\", 'utf-8') msg[\"Subject\"] = Header(mail_title, 'utf-8') msg[\"From\"] = sendMail msg[\"To\"] = receiver",
"json import time from email.mime.text import MIMEText from email.header import Header from smtplib",
"\"txt\" }) response = requests.post(url=url, data=data, headers=headers).json() if response['code'] == 200: return Notice.log(f\"{self.account.get('mobile')}\\tPush",
"response['code'] == 200: return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\") else: print(\"发送失败,转为本地记录\") Notice.saveLocal(content) return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因: {response['msg']}\\n\")",
"== 200: return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\") else: print(\"发送失败,转为本地记录\") Notice.saveLocal(content) return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因: {response['msg']}\\n\") @staticmethod",
"as f: f.write(message) print(message) @staticmethod def saveLocal(message): with open(\"data/result.log\", mode=\"a+\", encoding=\"utf-8\") as w:",
"apecode @File :notice.py @IDE :PyCharm @Blog:https://liiuyangxiong.cn \"\"\" import json import time from email.mime.text",
"print(\"发送失败,转为本地记录\") Notice.saveLocal(content) return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送失败!原因: {response['msg']}\\n\") @staticmethod def log(message: str): with open(file=\"data/logs.log\", mode=\"a+\",",
"msg[\"To\"] = receiver smtp.sendmail(sendMail, receiver, msg.as_string()) smtp.quit() return True except Exception as e:",
"= receiver smtp.sendmail(sendMail, receiver, msg.as_string()) smtp.quit() return True except Exception as e: print(e)",
"ssl登录 smtp = SMTP_SSL(host_server) smtp.ehlo(host_server) smtp.login(sendMail, authCode) msg = MIMEText(message, \"html\", 'utf-8') msg[\"Subject\"]",
"host_server = self.admin[0][\"mail\"][\"smtpServer\"] # 发件人的邮箱 sendMail = self.admin[0][\"mail\"][\"sendMail\"] # 邮箱的授权码 authCode = self.admin[0][\"mail\"][\"authCode\"]",
":PyCharm @Blog:https://liiuyangxiong.cn \"\"\" import json import time from email.mime.text import MIMEText from email.header",
"# ssl登录 smtp = SMTP_SSL(host_server) smtp.ehlo(host_server) smtp.login(sendMail, authCode) msg = MIMEText(message, \"html\", 'utf-8')",
"\" 签到情况\" # ssl登录 smtp = SMTP_SSL(host_server) smtp.ehlo(host_server) smtp.login(sendMail, authCode) msg = MIMEText(message,",
"{\"Content-Type\": \"application/json\"} data = json.dumps({ \"token\": self.account.get(\"pushToken\"), \"title\": \"易班签到通知\", \"content\": content, \"template\": \"txt\"",
"requests.post(url=url, data=data, headers=headers).json() if response['code'] == 200: return Notice.log(f\"{self.account.get('mobile')}\\tPush Plus发送成功!\\n\") else: print(\"发送失败,转为本地记录\") Notice.saveLocal(content)",
"msg[\"Subject\"] = Header(mail_title, 'utf-8') msg[\"From\"] = sendMail msg[\"To\"] = receiver smtp.sendmail(sendMail, receiver, msg.as_string())",
"receiver smtp.sendmail(sendMail, receiver, msg.as_string()) smtp.quit() return True except Exception as e: print(e) return",
"config class Notice: def __init__(self, admin: dict, account: dict): self.admin = admin, self.account",
"msg.as_string()) smtp.quit() return True except Exception as e: print(e) return False # 发送pushPlus",
"-*- coding: utf-8 -*- \"\"\" @Time : 2021/8/24 13:00 @Auth : apecode @File",
"content, \"template\": \"txt\" }) response = requests.post(url=url, data=data, headers=headers).json() if response['code'] == 200:"
] |
[
"def decrypt_data(self, encrypted_token: str) -> bytes: jwe_token = jwe.JWE(algs=[\"dir\", \"A256GCM\"]) jwe_token.deserialize(encrypted_token, self.key) payload:",
"jwe.JWE( plaintext=data, protected=protected_header, recipient=self.key ) serialized_token: str = jwe_token.serialize(compact=True) return serialized_token def decrypt_data(self,",
"hashlib from typing import Optional, Union from jwcrypto import jwe, jwk from jwcrypto.common",
"@staticmethod def _generate_key(user_id: str, user_ik: str, pepper: str) -> jwk.JWK: sha256 = hashlib.sha256()",
"not provided\") if not user_ik: raise ValueError(\"user_ik not provided\") if not pepper: raise",
"jwk.JWK(**password) def encrypt_data(self, data: Union[str, dict]) -> str: if isinstance(data, dict): data =",
"provided\") if not user_ik: raise ValueError(\"user_ik not provided\") if not pepper: raise ValueError(\"pepper",
") serialized_token: str = jwe_token.serialize(compact=True) return serialized_token def decrypt_data(self, encrypted_token: str) -> bytes:",
"Optional[str] ) -> None: if not user_id: raise ValueError(\"user_id not provided\") if not",
"typing import Optional, Union from jwcrypto import jwe, jwk from jwcrypto.common import base64url_encode",
"we only need the first 32 characters for the CEK cek = to_bytes(sha256.hexdigest()[:32])",
"jwk.JWK: sha256 = hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\")) # we only need the first",
"data = json_dumps(data) protected_header = {\"alg\": \"dir\", \"enc\": \"A256GCM\", \"kid\": \"1,1\"} jwe_token =",
"import hashlib from typing import Optional, Union from jwcrypto import jwe, jwk from",
"self.key = self._generate_key(user_id, user_ik, pepper) @staticmethod def _generate_key(user_id: str, user_ik: str, pepper: str)",
"sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\")) # we only need the first 32 characters for the",
"str = jwe_token.serialize(compact=True) return serialized_token def decrypt_data(self, encrypted_token: str) -> bytes: jwe_token =",
"None: if not user_id: raise ValueError(\"user_id not provided\") if not user_ik: raise ValueError(\"user_ik",
"jwe, jwk from jwcrypto.common import base64url_encode from structlog import get_logger from app.utilities.json import",
"self, user_id: Optional[str], user_ik: Optional[str], pepper: Optional[str] ) -> None: if not user_id:",
"sha256 = hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\")) # we only need the first 32",
"get_logger() class StorageEncryption: def __init__( self, user_id: Optional[str], user_ik: Optional[str], pepper: Optional[str] )",
"= jwe_token.serialize(compact=True) return serialized_token def decrypt_data(self, encrypted_token: str) -> bytes: jwe_token = jwe.JWE(algs=[\"dir\",",
"sha256.update(to_str(pepper).encode(\"utf-8\")) # we only need the first 32 characters for the CEK cek",
"ValueError(\"user_ik not provided\") if not pepper: raise ValueError(\"pepper not provided\") self.key = self._generate_key(user_id,",
"structlog import get_logger from app.utilities.json import json_dumps from app.utilities.strings import to_bytes, to_str logger",
"password = {\"<PASSWORD>\": \"<PASSWORD>\", \"k\": base64url_encode(cek)} return jwk.JWK(**password) def encrypt_data(self, data: Union[str, dict])",
"str, user_ik: str, pepper: str) -> jwk.JWK: sha256 = hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\"))",
"logger = get_logger() class StorageEncryption: def __init__( self, user_id: Optional[str], user_ik: Optional[str], pepper:",
"dict]) -> str: if isinstance(data, dict): data = json_dumps(data) protected_header = {\"alg\": \"dir\",",
"pepper: Optional[str] ) -> None: if not user_id: raise ValueError(\"user_id not provided\") if",
"to_bytes(sha256.hexdigest()[:32]) password = {\"<PASSWORD>\": \"<PASSWORD>\", \"k\": base64url_encode(cek)} return jwk.JWK(**password) def encrypt_data(self, data: Union[str,",
"first 32 characters for the CEK cek = to_bytes(sha256.hexdigest()[:32]) password = {\"<PASSWORD>\": \"<PASSWORD>\",",
") -> None: if not user_id: raise ValueError(\"user_id not provided\") if not user_ik:",
"self._generate_key(user_id, user_ik, pepper) @staticmethod def _generate_key(user_id: str, user_ik: str, pepper: str) -> jwk.JWK:",
"import json_dumps from app.utilities.strings import to_bytes, to_str logger = get_logger() class StorageEncryption: def",
"from structlog import get_logger from app.utilities.json import json_dumps from app.utilities.strings import to_bytes, to_str",
"32 characters for the CEK cek = to_bytes(sha256.hexdigest()[:32]) password = {\"<PASSWORD>\": \"<PASSWORD>\", \"k\":",
"characters for the CEK cek = to_bytes(sha256.hexdigest()[:32]) password = {\"<PASSWORD>\": \"<PASSWORD>\", \"k\": base64url_encode(cek)}",
"serialized_token def decrypt_data(self, encrypted_token: str) -> bytes: jwe_token = jwe.JWE(algs=[\"dir\", \"A256GCM\"]) jwe_token.deserialize(encrypted_token, self.key)",
"from jwcrypto import jwe, jwk from jwcrypto.common import base64url_encode from structlog import get_logger",
"jwcrypto import jwe, jwk from jwcrypto.common import base64url_encode from structlog import get_logger from",
"ValueError(\"user_id not provided\") if not user_ik: raise ValueError(\"user_ik not provided\") if not pepper:",
"if not user_id: raise ValueError(\"user_id not provided\") if not user_ik: raise ValueError(\"user_ik not",
"\"A256GCM\", \"kid\": \"1,1\"} jwe_token = jwe.JWE( plaintext=data, protected=protected_header, recipient=self.key ) serialized_token: str =",
"= self._generate_key(user_id, user_ik, pepper) @staticmethod def _generate_key(user_id: str, user_ik: str, pepper: str) ->",
"hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\")) # we only need the first 32 characters for",
"str) -> jwk.JWK: sha256 = hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\")) # we only need",
"-> None: if not user_id: raise ValueError(\"user_id not provided\") if not user_ik: raise",
"= to_bytes(sha256.hexdigest()[:32]) password = {\"<PASSWORD>\": \"<PASSWORD>\", \"k\": base64url_encode(cek)} return jwk.JWK(**password) def encrypt_data(self, data:",
"the CEK cek = to_bytes(sha256.hexdigest()[:32]) password = {\"<PASSWORD>\": \"<PASSWORD>\", \"k\": base64url_encode(cek)} return jwk.JWK(**password)",
"recipient=self.key ) serialized_token: str = jwe_token.serialize(compact=True) return serialized_token def decrypt_data(self, encrypted_token: str) ->",
"{\"<PASSWORD>\": \"<PASSWORD>\", \"k\": base64url_encode(cek)} return jwk.JWK(**password) def encrypt_data(self, data: Union[str, dict]) -> str:",
"user_id: raise ValueError(\"user_id not provided\") if not user_ik: raise ValueError(\"user_ik not provided\") if",
"\"dir\", \"enc\": \"A256GCM\", \"kid\": \"1,1\"} jwe_token = jwe.JWE( plaintext=data, protected=protected_header, recipient=self.key ) serialized_token:",
"pepper) @staticmethod def _generate_key(user_id: str, user_ik: str, pepper: str) -> jwk.JWK: sha256 =",
"Optional, Union from jwcrypto import jwe, jwk from jwcrypto.common import base64url_encode from structlog",
"class StorageEncryption: def __init__( self, user_id: Optional[str], user_ik: Optional[str], pepper: Optional[str] ) ->",
"str) -> bytes: jwe_token = jwe.JWE(algs=[\"dir\", \"A256GCM\"]) jwe_token.deserialize(encrypted_token, self.key) payload: bytes = jwe_token.payload",
"import base64url_encode from structlog import get_logger from app.utilities.json import json_dumps from app.utilities.strings import",
"for the CEK cek = to_bytes(sha256.hexdigest()[:32]) password = {\"<PASSWORD>\": \"<PASSWORD>\", \"k\": base64url_encode(cek)} return",
"encrypt_data(self, data: Union[str, dict]) -> str: if isinstance(data, dict): data = json_dumps(data) protected_header",
"user_ik: Optional[str], pepper: Optional[str] ) -> None: if not user_id: raise ValueError(\"user_id not",
"plaintext=data, protected=protected_header, recipient=self.key ) serialized_token: str = jwe_token.serialize(compact=True) return serialized_token def decrypt_data(self, encrypted_token:",
"def _generate_key(user_id: str, user_ik: str, pepper: str) -> jwk.JWK: sha256 = hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\"))",
"json_dumps(data) protected_header = {\"alg\": \"dir\", \"enc\": \"A256GCM\", \"kid\": \"1,1\"} jwe_token = jwe.JWE( plaintext=data,",
"= {\"alg\": \"dir\", \"enc\": \"A256GCM\", \"kid\": \"1,1\"} jwe_token = jwe.JWE( plaintext=data, protected=protected_header, recipient=self.key",
"def encrypt_data(self, data: Union[str, dict]) -> str: if isinstance(data, dict): data = json_dumps(data)",
"bytes: jwe_token = jwe.JWE(algs=[\"dir\", \"A256GCM\"]) jwe_token.deserialize(encrypted_token, self.key) payload: bytes = jwe_token.payload return payload",
"pepper: raise ValueError(\"pepper not provided\") self.key = self._generate_key(user_id, user_ik, pepper) @staticmethod def _generate_key(user_id:",
"raise ValueError(\"user_id not provided\") if not user_ik: raise ValueError(\"user_ik not provided\") if not",
"raise ValueError(\"user_ik not provided\") if not pepper: raise ValueError(\"pepper not provided\") self.key =",
"= {\"<PASSWORD>\": \"<PASSWORD>\", \"k\": base64url_encode(cek)} return jwk.JWK(**password) def encrypt_data(self, data: Union[str, dict]) ->",
"not provided\") self.key = self._generate_key(user_id, user_ik, pepper) @staticmethod def _generate_key(user_id: str, user_ik: str,",
"data: Union[str, dict]) -> str: if isinstance(data, dict): data = json_dumps(data) protected_header =",
"return serialized_token def decrypt_data(self, encrypted_token: str) -> bytes: jwe_token = jwe.JWE(algs=[\"dir\", \"A256GCM\"]) jwe_token.deserialize(encrypted_token,",
"if isinstance(data, dict): data = json_dumps(data) protected_header = {\"alg\": \"dir\", \"enc\": \"A256GCM\", \"kid\":",
"protected=protected_header, recipient=self.key ) serialized_token: str = jwe_token.serialize(compact=True) return serialized_token def decrypt_data(self, encrypted_token: str)",
"app.utilities.json import json_dumps from app.utilities.strings import to_bytes, to_str logger = get_logger() class StorageEncryption:",
"app.utilities.strings import to_bytes, to_str logger = get_logger() class StorageEncryption: def __init__( self, user_id:",
"-> jwk.JWK: sha256 = hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\")) # we only need the",
"_generate_key(user_id: str, user_ik: str, pepper: str) -> jwk.JWK: sha256 = hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\"))",
"user_ik: raise ValueError(\"user_ik not provided\") if not pepper: raise ValueError(\"pepper not provided\") self.key",
"from app.utilities.json import json_dumps from app.utilities.strings import to_bytes, to_str logger = get_logger() class",
"base64url_encode from structlog import get_logger from app.utilities.json import json_dumps from app.utilities.strings import to_bytes,",
"pepper: str) -> jwk.JWK: sha256 = hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\")) # we only",
"\"k\": base64url_encode(cek)} return jwk.JWK(**password) def encrypt_data(self, data: Union[str, dict]) -> str: if isinstance(data,",
"\"kid\": \"1,1\"} jwe_token = jwe.JWE( plaintext=data, protected=protected_header, recipient=self.key ) serialized_token: str = jwe_token.serialize(compact=True)",
"raise ValueError(\"pepper not provided\") self.key = self._generate_key(user_id, user_ik, pepper) @staticmethod def _generate_key(user_id: str,",
"get_logger from app.utilities.json import json_dumps from app.utilities.strings import to_bytes, to_str logger = get_logger()",
"the first 32 characters for the CEK cek = to_bytes(sha256.hexdigest()[:32]) password = {\"<PASSWORD>\":",
"not pepper: raise ValueError(\"pepper not provided\") self.key = self._generate_key(user_id, user_ik, pepper) @staticmethod def",
"{\"alg\": \"dir\", \"enc\": \"A256GCM\", \"kid\": \"1,1\"} jwe_token = jwe.JWE( plaintext=data, protected=protected_header, recipient=self.key )",
"user_id: Optional[str], user_ik: Optional[str], pepper: Optional[str] ) -> None: if not user_id: raise",
"not provided\") if not pepper: raise ValueError(\"pepper not provided\") self.key = self._generate_key(user_id, user_ik,",
"= hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\")) # we only need the first 32 characters",
"need the first 32 characters for the CEK cek = to_bytes(sha256.hexdigest()[:32]) password =",
"to_bytes, to_str logger = get_logger() class StorageEncryption: def __init__( self, user_id: Optional[str], user_ik:",
"= get_logger() class StorageEncryption: def __init__( self, user_id: Optional[str], user_ik: Optional[str], pepper: Optional[str]",
"if not user_ik: raise ValueError(\"user_ik not provided\") if not pepper: raise ValueError(\"pepper not",
"Union[str, dict]) -> str: if isinstance(data, dict): data = json_dumps(data) protected_header = {\"alg\":",
"= jwe.JWE( plaintext=data, protected=protected_header, recipient=self.key ) serialized_token: str = jwe_token.serialize(compact=True) return serialized_token def",
"\"1,1\"} jwe_token = jwe.JWE( plaintext=data, protected=protected_header, recipient=self.key ) serialized_token: str = jwe_token.serialize(compact=True) return",
"\"enc\": \"A256GCM\", \"kid\": \"1,1\"} jwe_token = jwe.JWE( plaintext=data, protected=protected_header, recipient=self.key ) serialized_token: str",
"-> bytes: jwe_token = jwe.JWE(algs=[\"dir\", \"A256GCM\"]) jwe_token.deserialize(encrypted_token, self.key) payload: bytes = jwe_token.payload return",
"if not pepper: raise ValueError(\"pepper not provided\") self.key = self._generate_key(user_id, user_ik, pepper) @staticmethod",
"isinstance(data, dict): data = json_dumps(data) protected_header = {\"alg\": \"dir\", \"enc\": \"A256GCM\", \"kid\": \"1,1\"}",
"encrypted_token: str) -> bytes: jwe_token = jwe.JWE(algs=[\"dir\", \"A256GCM\"]) jwe_token.deserialize(encrypted_token, self.key) payload: bytes =",
"import get_logger from app.utilities.json import json_dumps from app.utilities.strings import to_bytes, to_str logger =",
"Union from jwcrypto import jwe, jwk from jwcrypto.common import base64url_encode from structlog import",
"import to_bytes, to_str logger = get_logger() class StorageEncryption: def __init__( self, user_id: Optional[str],",
"ValueError(\"pepper not provided\") self.key = self._generate_key(user_id, user_ik, pepper) @staticmethod def _generate_key(user_id: str, user_ik:",
"Optional[str], user_ik: Optional[str], pepper: Optional[str] ) -> None: if not user_id: raise ValueError(\"user_id",
"StorageEncryption: def __init__( self, user_id: Optional[str], user_ik: Optional[str], pepper: Optional[str] ) -> None:",
"not user_id: raise ValueError(\"user_id not provided\") if not user_ik: raise ValueError(\"user_ik not provided\")",
"user_ik, pepper) @staticmethod def _generate_key(user_id: str, user_ik: str, pepper: str) -> jwk.JWK: sha256",
"jwe_token.serialize(compact=True) return serialized_token def decrypt_data(self, encrypted_token: str) -> bytes: jwe_token = jwe.JWE(algs=[\"dir\", \"A256GCM\"])",
"sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\")) # we only need the first 32 characters for the CEK",
"serialized_token: str = jwe_token.serialize(compact=True) return serialized_token def decrypt_data(self, encrypted_token: str) -> bytes: jwe_token",
"from app.utilities.strings import to_bytes, to_str logger = get_logger() class StorageEncryption: def __init__( self,",
"str, pepper: str) -> jwk.JWK: sha256 = hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\")) # we",
"not user_ik: raise ValueError(\"user_ik not provided\") if not pepper: raise ValueError(\"pepper not provided\")",
"CEK cek = to_bytes(sha256.hexdigest()[:32]) password = {\"<PASSWORD>\": \"<PASSWORD>\", \"k\": base64url_encode(cek)} return jwk.JWK(**password) def",
"dict): data = json_dumps(data) protected_header = {\"alg\": \"dir\", \"enc\": \"A256GCM\", \"kid\": \"1,1\"} jwe_token",
"provided\") if not pepper: raise ValueError(\"pepper not provided\") self.key = self._generate_key(user_id, user_ik, pepper)",
"import jwe, jwk from jwcrypto.common import base64url_encode from structlog import get_logger from app.utilities.json",
"def __init__( self, user_id: Optional[str], user_ik: Optional[str], pepper: Optional[str] ) -> None: if",
"cek = to_bytes(sha256.hexdigest()[:32]) password = {\"<PASSWORD>\": \"<PASSWORD>\", \"k\": base64url_encode(cek)} return jwk.JWK(**password) def encrypt_data(self,",
"-> str: if isinstance(data, dict): data = json_dumps(data) protected_header = {\"alg\": \"dir\", \"enc\":",
"__init__( self, user_id: Optional[str], user_ik: Optional[str], pepper: Optional[str] ) -> None: if not",
"from typing import Optional, Union from jwcrypto import jwe, jwk from jwcrypto.common import",
"# we only need the first 32 characters for the CEK cek =",
"Optional[str], pepper: Optional[str] ) -> None: if not user_id: raise ValueError(\"user_id not provided\")",
"only need the first 32 characters for the CEK cek = to_bytes(sha256.hexdigest()[:32]) password",
"jwcrypto.common import base64url_encode from structlog import get_logger from app.utilities.json import json_dumps from app.utilities.strings",
"provided\") self.key = self._generate_key(user_id, user_ik, pepper) @staticmethod def _generate_key(user_id: str, user_ik: str, pepper:",
"protected_header = {\"alg\": \"dir\", \"enc\": \"A256GCM\", \"kid\": \"1,1\"} jwe_token = jwe.JWE( plaintext=data, protected=protected_header,",
"base64url_encode(cek)} return jwk.JWK(**password) def encrypt_data(self, data: Union[str, dict]) -> str: if isinstance(data, dict):",
"return jwk.JWK(**password) def encrypt_data(self, data: Union[str, dict]) -> str: if isinstance(data, dict): data",
"= json_dumps(data) protected_header = {\"alg\": \"dir\", \"enc\": \"A256GCM\", \"kid\": \"1,1\"} jwe_token = jwe.JWE(",
"decrypt_data(self, encrypted_token: str) -> bytes: jwe_token = jwe.JWE(algs=[\"dir\", \"A256GCM\"]) jwe_token.deserialize(encrypted_token, self.key) payload: bytes",
"json_dumps from app.utilities.strings import to_bytes, to_str logger = get_logger() class StorageEncryption: def __init__(",
"from jwcrypto.common import base64url_encode from structlog import get_logger from app.utilities.json import json_dumps from",
"\"<PASSWORD>\", \"k\": base64url_encode(cek)} return jwk.JWK(**password) def encrypt_data(self, data: Union[str, dict]) -> str: if",
"jwe_token = jwe.JWE( plaintext=data, protected=protected_header, recipient=self.key ) serialized_token: str = jwe_token.serialize(compact=True) return serialized_token",
"to_str logger = get_logger() class StorageEncryption: def __init__( self, user_id: Optional[str], user_ik: Optional[str],",
"user_ik: str, pepper: str) -> jwk.JWK: sha256 = hashlib.sha256() sha256.update(to_str(user_id).encode(\"utf-8\")) sha256.update(to_str(user_ik).encode(\"utf-8\")) sha256.update(to_str(pepper).encode(\"utf-8\")) #",
"str: if isinstance(data, dict): data = json_dumps(data) protected_header = {\"alg\": \"dir\", \"enc\": \"A256GCM\",",
"jwk from jwcrypto.common import base64url_encode from structlog import get_logger from app.utilities.json import json_dumps",
"import Optional, Union from jwcrypto import jwe, jwk from jwcrypto.common import base64url_encode from",
"<gh_stars>0 import hashlib from typing import Optional, Union from jwcrypto import jwe, jwk"
] |
[
"from models.league import League from models.standings import Standings league = League() league.reload() standings",
"json from models.league import League from models.standings import Standings league = League() league.reload()",
"<gh_stars>1-10 import json from models.league import League from models.standings import Standings league =",
"import json from models.league import League from models.standings import Standings league = League()",
"import League from models.standings import Standings league = League() league.reload() standings = Standings()",
"models.league import League from models.standings import Standings league = League() league.reload() standings =",
"League from models.standings import Standings league = League() league.reload() standings = Standings() standings.reload()"
] |
[
"from ...wallet.util import b58_to_bytes from ..did_key import DIDKey, DID_KEY_RESOLVERS from .test_dids import (",
"did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_fingerprint(self): did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert did_key.did == TEST_BLS12381G1_DID assert",
"== TEST_BLS12381G1_DID def test_bls12381g1_from_public_key_b58(self): did_key = DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1 ) assert did_key.did ==",
"assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert",
"test_bls12381g1_from_fingerprint(self): did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def",
"did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES def test_bls12381g1_diddoc(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert resolver(did_key)",
"= DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert resolver(did_key) == did_key.did_doc def test_bls12381g1_resolver(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver =",
"assert did_key.key_type == KeyType.BLS12381G1 assert did_key.key_id == TEST_BLS12381G1_KEY_ID assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES def",
"== TEST_BLS12381G1_BASE58_KEY assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert did_key.key_type == KeyType.BLS12381G1 assert did_key.key_id ==",
"did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_public_key_b58(self): did_key = DIDKey.from_public_key_b58(",
"from ..did_key import DIDKey, DID_KEY_RESOLVERS from .test_dids import ( DID_B<KEY>, ) TEST_BLS12381G1_BASE58_KEY =",
"DIDKey, DID_KEY_RESOLVERS from .test_dids import ( DID_B<KEY>, ) TEST_BLS12381G1_BASE58_KEY = ( \"<KEY>\" )",
"unittest import TestCase from ...wallet.key_type import KeyType from ...wallet.util import b58_to_bytes from ..did_key",
"= DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_properties(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.fingerprint",
"TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_properties(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT assert did_key.did ==",
"did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert did_key.key_type == KeyType.BLS12381G1 assert did_key.key_id",
"DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1 ) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_fingerprint(self): did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT)",
"= DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1 ) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_fingerprint(self): did_key =",
"= DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 ==",
"assert did_key.key_id == TEST_BLS12381G1_KEY_ID assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES def test_bls12381g1_diddoc(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID)",
"= ( \"<KEY>\" ) TEST_BLS12381G1_DID = f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID = f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES = b\"\".join(",
"== did_key.did_doc def test_bls12381g1_resolver(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] did_doc = resolver(did_key)",
"TEST_BLS12381G1_FINGERPRINT = ( \"<KEY>\" ) TEST_BLS12381G1_DID = f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID = f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES =",
"DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] did_doc = resolver(did_key) assert ( did_doc == DID_BLS12381G1_z3tEFALUKUzzCAvytMHX8X4SnsNsq6T5tC5Zb18oQEt1FqNcJXqJ3AA9umgzA9yoqPBeWA )",
"import KeyType from ...wallet.util import b58_to_bytes from ..did_key import DIDKey, DID_KEY_RESOLVERS from .test_dids",
"TEST_BLS12381G1_DID def test_bls12381g1_from_public_key_b58(self): did_key = DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1 ) assert did_key.did == TEST_BLS12381G1_DID",
"did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert resolver(did_key) == did_key.did_doc def test_bls12381g1_resolver(self): did_key",
"TestCase from ...wallet.key_type import KeyType from ...wallet.util import b58_to_bytes from ..did_key import DIDKey,",
"b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert did_key.key_type == KeyType.BLS12381G1 assert did_key.key_id == TEST_BLS12381G1_KEY_ID assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES",
"assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_fingerprint(self): did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert did_key.did == TEST_BLS12381G1_DID",
"import DIDKey, DID_KEY_RESOLVERS from .test_dids import ( DID_B<KEY>, ) TEST_BLS12381G1_BASE58_KEY = ( \"<KEY>\"",
"== TEST_BLS12381G1_FINGERPRINT assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY assert did_key.public_key ==",
".test_dids import ( DID_B<KEY>, ) TEST_BLS12381G1_BASE58_KEY = ( \"<KEY>\" ) TEST_BLS12381G1_FINGERPRINT = (",
"did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] did_doc = resolver(did_key) assert ( did_doc ==",
"key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_public_key_b58(self):",
"= b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_public_key_b58(self): did_key",
"KeyType.BLS12381G1) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_public_key_b58(self): did_key = DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1 )",
"f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES = b\"\".join( [b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] ) class TestDIDKey(TestCase): def test_bls12381g1_from_public_key(self): key_bytes =",
"= f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES = b\"\".join( [b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] ) class TestDIDKey(TestCase): def test_bls12381g1_from_public_key(self): key_bytes",
"TEST_BLS12381G1_PREFIX_BYTES = b\"\".join( [b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] ) class TestDIDKey(TestCase): def test_bls12381g1_from_public_key(self): key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)",
"DID_B<KEY>, ) TEST_BLS12381G1_BASE58_KEY = ( \"<KEY>\" ) TEST_BLS12381G1_FINGERPRINT = ( \"<KEY>\" ) TEST_BLS12381G1_DID",
"== TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert did_key.key_type ==",
"did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY assert did_key.public_key",
"\"<KEY>\" ) TEST_BLS12381G1_DID = f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID = f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES = b\"\".join( [b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)]",
"DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_from_did(self): did_key =",
"did_key.key_id == TEST_BLS12381G1_KEY_ID assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES def test_bls12381g1_diddoc(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver",
") TEST_BLS12381G1_DID = f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID = f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES = b\"\".join( [b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] )",
"assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert did_key.key_type == KeyType.BLS12381G1 assert did_key.key_id == TEST_BLS12381G1_KEY_ID assert",
"did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert did_key.key_type",
"TestDIDKey(TestCase): def test_bls12381g1_from_public_key(self): key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1) assert did_key.did ==",
"( \"<KEY>\" ) TEST_BLS12381G1_FINGERPRINT = ( \"<KEY>\" ) TEST_BLS12381G1_DID = f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID =",
"did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_from_did(self):",
"TEST_BLS12381G1_DID = f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID = f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES = b\"\".join( [b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] ) class",
"DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert resolver(did_key) == did_key.did_doc def test_bls12381g1_resolver(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1]",
"TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_from_did(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_properties(self): did_key",
"from .test_dids import ( DID_B<KEY>, ) TEST_BLS12381G1_BASE58_KEY = ( \"<KEY>\" ) TEST_BLS12381G1_FINGERPRINT =",
"did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_properties(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert",
"assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_properties(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT",
"TEST_BLS12381G1_FINGERPRINT assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)",
"...wallet.util import b58_to_bytes from ..did_key import DIDKey, DID_KEY_RESOLVERS from .test_dids import ( DID_B<KEY>,",
"did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_public_key_b58(self): did_key = DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1 ) assert did_key.did",
"def test_bls12381g1_resolver(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] did_doc = resolver(did_key) assert (",
"TEST_BLS12381G1_PREFIX_BYTES def test_bls12381g1_diddoc(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert resolver(did_key) == did_key.did_doc",
"TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert did_key.key_type == KeyType.BLS12381G1",
"= DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_public_key_b58(self): did_key = DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY,",
"DID_KEY_RESOLVERS from .test_dids import ( DID_B<KEY>, ) TEST_BLS12381G1_BASE58_KEY = ( \"<KEY>\" ) TEST_BLS12381G1_FINGERPRINT",
"= DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert resolver(did_key) == did_key.did_doc def test_bls12381g1_resolver(self): did_key =",
"resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert resolver(did_key) == did_key.did_doc def test_bls12381g1_resolver(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver",
"b\"\".join( [b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] ) class TestDIDKey(TestCase): def test_bls12381g1_from_public_key(self): key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key =",
"import TestCase from ...wallet.key_type import KeyType from ...wallet.util import b58_to_bytes from ..did_key import",
"did_key.key_type == KeyType.BLS12381G1 assert did_key.key_id == TEST_BLS12381G1_KEY_ID assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES def test_bls12381g1_diddoc(self):",
"( DID_B<KEY>, ) TEST_BLS12381G1_BASE58_KEY = ( \"<KEY>\" ) TEST_BLS12381G1_FINGERPRINT = ( \"<KEY>\" )",
"= DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_from_did(self): did_key",
"test_bls12381g1_diddoc(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert resolver(did_key) == did_key.did_doc def test_bls12381g1_resolver(self):",
"did_key.did_doc def test_bls12381g1_resolver(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] did_doc = resolver(did_key) assert",
"KeyType.BLS12381G1 assert did_key.key_id == TEST_BLS12381G1_KEY_ID assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES def test_bls12381g1_diddoc(self): did_key =",
"def test_bls12381g1_from_public_key_b58(self): did_key = DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1 ) assert did_key.did == TEST_BLS12381G1_DID def",
"TEST_BLS12381G1_BASE58_KEY = ( \"<KEY>\" ) TEST_BLS12381G1_FINGERPRINT = ( \"<KEY>\" ) TEST_BLS12381G1_DID = f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\"",
"assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_public_key_b58(self): did_key = DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1 ) assert",
"def test_bls12381g1_diddoc(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert resolver(did_key) == did_key.did_doc def",
"did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_from_did(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert",
"class TestDIDKey(TestCase): def test_bls12381g1_from_public_key(self): key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1) assert did_key.did",
"def test_bls12381g1_properties(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT assert did_key.did == TEST_BLS12381G1_DID",
"= DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] did_doc = resolver(did_key) assert ( did_doc == DID_BLS12381G1_z3tEFALUKUzzCAvytMHX8X4SnsNsq6T5tC5Zb18oQEt1FqNcJXqJ3AA9umgzA9yoqPBeWA",
"TEST_BLS12381G1_DID def test_bls12381g1_from_fingerprint(self): did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 ==",
"DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY",
"test_bls12381g1_from_public_key_b58(self): did_key = DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1 ) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_fingerprint(self):",
"assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES def test_bls12381g1_diddoc(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert",
"KeyType.BLS12381G1 ) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_fingerprint(self): did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert did_key.did",
"resolver(did_key) == did_key.did_doc def test_bls12381g1_resolver(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] did_doc =",
"b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_public_key_b58(self): did_key =",
"from unittest import TestCase from ...wallet.key_type import KeyType from ...wallet.util import b58_to_bytes from",
"== TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_from_did(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_properties(self):",
"assert resolver(did_key) == did_key.did_doc def test_bls12381g1_resolver(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] did_doc",
"KeyType from ...wallet.util import b58_to_bytes from ..did_key import DIDKey, DID_KEY_RESOLVERS from .test_dids import",
"assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_from_did(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY",
") assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_fingerprint(self): did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert did_key.did ==",
"TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1 ) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_fingerprint(self): did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert",
"\"<KEY>\" ) TEST_BLS12381G1_FINGERPRINT = ( \"<KEY>\" ) TEST_BLS12381G1_DID = f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID = f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\"",
"def test_bls12381g1_from_fingerprint(self): did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY",
"assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_from_did(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID)",
"def test_bls12381g1_from_public_key(self): key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1) assert did_key.did == TEST_BLS12381G1_DID",
"b58_to_bytes from ..did_key import DIDKey, DID_KEY_RESOLVERS from .test_dids import ( DID_B<KEY>, ) TEST_BLS12381G1_BASE58_KEY",
"did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_from_did(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def",
"import ( DID_B<KEY>, ) TEST_BLS12381G1_BASE58_KEY = ( \"<KEY>\" ) TEST_BLS12381G1_FINGERPRINT = ( \"<KEY>\"",
"did_key = DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1 ) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_fingerprint(self): did_key",
"def test_bls12381g1_from_did(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_properties(self): did_key =",
"test_bls12381g1_resolver(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] did_doc = resolver(did_key) assert ( did_doc",
"TEST_BLS12381G1_KEY_ID assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES def test_bls12381g1_diddoc(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1]",
"TEST_BLS12381G1_BASE58_KEY assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert did_key.key_type == KeyType.BLS12381G1 assert did_key.key_id == TEST_BLS12381G1_KEY_ID",
"b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] ) class TestDIDKey(TestCase): def test_bls12381g1_from_public_key(self): key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1)",
"DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_properties(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.fingerprint ==",
") TEST_BLS12381G1_FINGERPRINT = ( \"<KEY>\" ) TEST_BLS12381G1_DID = f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID = f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES",
"from ...wallet.key_type import KeyType from ...wallet.util import b58_to_bytes from ..did_key import DIDKey, DID_KEY_RESOLVERS",
"== TEST_BLS12381G1_DID def test_bls12381g1_from_fingerprint(self): did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT) assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58",
"did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert did_key.key_type == KeyType.BLS12381G1 assert did_key.key_id == TEST_BLS12381G1_KEY_ID assert did_key.prefixed_public_key",
"= ( \"<KEY>\" ) TEST_BLS12381G1_FINGERPRINT = ( \"<KEY>\" ) TEST_BLS12381G1_DID = f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID",
"[b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] ) class TestDIDKey(TestCase): def test_bls12381g1_from_public_key(self): key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key = DIDKey.from_public_key(key_bytes,",
"did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58",
"test_bls12381g1_from_did(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_properties(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID)",
"assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT assert did_key.did == TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY assert",
"== KeyType.BLS12381G1 assert did_key.key_id == TEST_BLS12381G1_KEY_ID assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES def test_bls12381g1_diddoc(self): did_key",
"test_bls12381g1_properties(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT assert did_key.did == TEST_BLS12381G1_DID assert",
"( \"<KEY>\" ) TEST_BLS12381G1_DID = f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID = f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES = b\"\".join( [b\"\\xea\\x01\",",
"DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1) assert did_key.did == TEST_BLS12381G1_DID def test_bls12381g1_from_public_key_b58(self): did_key = DIDKey.from_public_key_b58( TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1",
"== b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert did_key.key_type == KeyType.BLS12381G1 assert did_key.key_id == TEST_BLS12381G1_KEY_ID assert did_key.prefixed_public_key ==",
"DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert resolver(did_key) == did_key.did_doc def test_bls12381g1_resolver(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID)",
"== TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_from_did(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.public_key_b58",
"== TEST_BLS12381G1_KEY_ID assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES def test_bls12381g1_diddoc(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver =",
"= f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID = f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES = b\"\".join( [b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] ) class TestDIDKey(TestCase):",
"test_bls12381g1_from_public_key(self): key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1) assert did_key.did == TEST_BLS12381G1_DID def",
"...wallet.key_type import KeyType from ...wallet.util import b58_to_bytes from ..did_key import DIDKey, DID_KEY_RESOLVERS from",
"TEST_BLS12381G1_DID assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_from_did(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.public_key_b58 ==",
"TEST_BLS12381G1_KEY_ID = f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES = b\"\".join( [b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] ) class TestDIDKey(TestCase): def test_bls12381g1_from_public_key(self):",
"assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) assert did_key.key_type == KeyType.BLS12381G1 assert",
"f\"did:key:{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_KEY_ID = f\"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}\" TEST_BLS12381G1_PREFIX_BYTES = b\"\".join( [b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] ) class TestDIDKey(TestCase): def",
"= b\"\".join( [b\"\\xea\\x01\", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)] ) class TestDIDKey(TestCase): def test_bls12381g1_from_public_key(self): key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key",
"== TEST_BLS12381G1_PREFIX_BYTES def test_bls12381g1_diddoc(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1] assert resolver(did_key) ==",
") TEST_BLS12381G1_BASE58_KEY = ( \"<KEY>\" ) TEST_BLS12381G1_FINGERPRINT = ( \"<KEY>\" ) TEST_BLS12381G1_DID =",
"..did_key import DIDKey, DID_KEY_RESOLVERS from .test_dids import ( DID_B<KEY>, ) TEST_BLS12381G1_BASE58_KEY = (",
") class TestDIDKey(TestCase): def test_bls12381g1_from_public_key(self): key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY) did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1) assert",
"did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_properties(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT assert",
"import b58_to_bytes from ..did_key import DIDKey, DID_KEY_RESOLVERS from .test_dids import ( DID_B<KEY>, )",
"== TEST_BLS12381G1_BASE58_KEY def test_bls12381g1_properties(self): did_key = DIDKey.from_did(TEST_BLS12381G1_DID) assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT assert did_key.did"
] |
[
"it should be called for the test to work. \"\"\" import subprocess if",
"__name__ == '__main__': process = subprocess.Popen( ['python', 'test_executable_caller.py','test_executable_callee.py'], shell = False, universal_newlines =",
"to work. \"\"\" import subprocess if __name__ == '__main__': process = subprocess.Popen( ['python',",
"work. \"\"\" import subprocess if __name__ == '__main__': process = subprocess.Popen( ['python', 'test_executable_caller.py','test_executable_callee.py'],",
"import subprocess if __name__ == '__main__': process = subprocess.Popen( ['python', 'test_executable_caller.py','test_executable_callee.py'], shell =",
"= subprocess.Popen( ['python', 'test_executable_caller.py','test_executable_callee.py'], shell = False, universal_newlines = True ) exit_status =",
"calls test_executable_caller as it should be called for the test to work. \"\"\"",
"called for the test to work. \"\"\" import subprocess if __name__ == '__main__':",
"subprocess.Popen( ['python', 'test_executable_caller.py','test_executable_callee.py'], shell = False, universal_newlines = True ) exit_status = process.wait()",
"test to work. \"\"\" import subprocess if __name__ == '__main__': process = subprocess.Popen(",
"#!/usr/bin/env python \"\"\" this calls test_executable_caller as it should be called for the",
"subprocess if __name__ == '__main__': process = subprocess.Popen( ['python', 'test_executable_caller.py','test_executable_callee.py'], shell = False,",
"be called for the test to work. \"\"\" import subprocess if __name__ ==",
"'__main__': process = subprocess.Popen( ['python', 'test_executable_caller.py','test_executable_callee.py'], shell = False, universal_newlines = True )",
"test_executable_caller as it should be called for the test to work. \"\"\" import",
"python \"\"\" this calls test_executable_caller as it should be called for the test",
"this calls test_executable_caller as it should be called for the test to work.",
"\"\"\" import subprocess if __name__ == '__main__': process = subprocess.Popen( ['python', 'test_executable_caller.py','test_executable_callee.py'], shell",
"\"\"\" this calls test_executable_caller as it should be called for the test to",
"for the test to work. \"\"\" import subprocess if __name__ == '__main__': process",
"if __name__ == '__main__': process = subprocess.Popen( ['python', 'test_executable_caller.py','test_executable_callee.py'], shell = False, universal_newlines",
"the test to work. \"\"\" import subprocess if __name__ == '__main__': process =",
"should be called for the test to work. \"\"\" import subprocess if __name__",
"process = subprocess.Popen( ['python', 'test_executable_caller.py','test_executable_callee.py'], shell = False, universal_newlines = True ) exit_status",
"as it should be called for the test to work. \"\"\" import subprocess",
"== '__main__': process = subprocess.Popen( ['python', 'test_executable_caller.py','test_executable_callee.py'], shell = False, universal_newlines = True"
] |
[
"assert isinstance(sub_category_n, SubCategory) assert str(sub_category_n) == \"n (m)\" def test_quiz(quiz_q, user_A, category_m, sub_category_n):",
"assert question_q.theme3 == \"t3\" assert str(question_q) == \"question\" def test_answerUser(answerUser, question_q, user_A): assert",
"statistics=stats_s, score=5 ) @pytest.fixture def themeScore_ts(db, stats_s, quiz_q): return ThemeScore.objects.create( theme=\"t1\", score=5, statistics=stats_s,",
"question_q): return QuestionScore.objects.create( question=question_q, statistics=stats_s, score=5 ) @pytest.fixture def themeScore_ts(db, stats_s, quiz_q): return",
"quiz_q): return ThemeScore.objects.create( theme=\"t1\", score=5, statistics=stats_s, quiz=quiz_q ) ### TESTS ### def test_category(category_m):",
"== 5 assert stats_s.medium == 5 assert stats_s.difficult == 5 def test_grade(grade_g, stats_s):",
"\"m\" assert str(category_m) == \"m\" def test_sub_category(category_m, sub_category_n): assert sub_category_n.sub_category == \"n\" assert",
"random_order=False, difficulty=1, ) @pytest.fixture def question_q(db, quiz_q): return Question.objects.create( quiz=quiz_q, difficulty=1, order=1, figure=None,",
"return Category.objects.create(category=\"m\") @pytest.fixture def sub_category_n(db, category_m): return SubCategory.objects.create(category=category_m, sub_category=\"n\") @pytest.fixture def user_A(db): return",
"1 assert str(quiz_q) == \"title\" def test_question(quiz_q, question_q): assert question_q.quiz == quiz_q assert",
"quiz=quiz_q, difficulty=1, order=1, figure=None, content=\"question\", explanation=None, theme1=\"t1\", theme2=\"t2\", theme3=\"t3\", ) @pytest.fixture def answerUser(db,",
"assert questionScore_qs.score == 5 def test_themeScore(themeScore_ts, stats_s, quiz_q): assert themeScore_ts.theme == \"t1\" assert",
"assert answerUser.correct == True assert answerUser.question.get(pk=question_q.id) == question_q assert answerUser.user.get(pk=user_A.id) == user_A def",
"date = datetime.datetime.now() assert quiz_q.title == \"title\" assert quiz_q.description == \"Long description\" assert",
"@pytest.fixture def questionScore_qs(db, stats_s, question_q): return QuestionScore.objects.create( question=question_q, statistics=stats_s, score=5 ) @pytest.fixture def",
"stats_s.difficult == 5 def test_grade(grade_g, stats_s): assert grade_g.grade == 5 assert grade_g.number ==",
"medium=5, difficult=5 ) @pytest.fixture def grade_g(db, stats_s): return Grade.objects.create(grade=5, number=10, statistics=stats_s) @pytest.fixture def",
"quiz_q.category == category_m assert quiz_q.sub_category == sub_category_n assert isinstance(quiz_q.created, datetime.datetime) assert quiz_q.created.year ==",
"assert grade_g.statistics == stats_s def test_questionScore(stats_s, question_q, questionScore_qs): assert questionScore_qs.question == question_q assert",
"datetime.datetime.now() assert quiz_q.title == \"title\" assert quiz_q.description == \"Long description\" assert quiz_q.creator ==",
"category_name=\"m\", sub_category=sub_category_n, created=date, random_order=False, difficulty=1, ) @pytest.fixture def question_q(db, quiz_q): return Question.objects.create( quiz=quiz_q,",
"answerUser.correct == True assert answerUser.question.get(pk=question_q.id) == question_q assert answerUser.user.get(pk=user_A.id) == user_A def test_statisc(stats_s,",
"str(quiz_q) == \"title\" def test_question(quiz_q, question_q): assert question_q.quiz == quiz_q assert question_q.difficulty ==",
") ### TESTS ### def test_category(category_m): assert isinstance(category_m, Category) assert category_m.category == \"m\"",
"test_question(quiz_q, question_q): assert question_q.quiz == quiz_q assert question_q.difficulty == 1 assert question_q.order ==",
"== quiz_q assert question_q.difficulty == 1 assert question_q.order == 1 assert question_q.figure ==",
"### TESTS ### def test_category(category_m): assert isinstance(category_m, Category) assert category_m.category == \"m\" assert",
"== date.year assert quiz_q.created.month == date.month assert quiz_q.created.day == date.day assert quiz_q.random_order ==",
"ThemeScore, ) import pytest ### FIXTURES ### @pytest.fixture def category_m(db): return Category.objects.create(category=\"m\") @pytest.fixture",
"question_q.difficulty == 1 assert question_q.order == 1 assert question_q.figure == None assert question_q.content",
"\"Long description\" assert quiz_q.creator == user_A assert quiz_q.category == category_m assert quiz_q.sub_category ==",
"\"question\" assert question_q.explanation == None assert question_q.theme1 == \"t1\" assert question_q.theme2 == \"t2\"",
"a.user.add(user_A) return a @pytest.fixture def stats_s(db, quiz_q): return Statistic.objects.create( quiz=quiz_q, number_participants=10, mean=15, easy=5,",
"sub_category_n.sub_category == \"n\" assert sub_category_n.category == category_m assert isinstance(sub_category_n, SubCategory) assert str(sub_category_n) ==",
"User.objects.create_user(username=\"A\") @pytest.fixture def quiz_q(db, category_m, sub_category_n, user_A): date = datetime.datetime.now() return Quiz.objects.create( title=\"title\",",
") @pytest.fixture def question_q(db, quiz_q): return Question.objects.create( quiz=quiz_q, difficulty=1, order=1, figure=None, content=\"question\", explanation=None,",
"assert str(quiz_q) == \"title\" def test_question(quiz_q, question_q): assert question_q.quiz == quiz_q assert question_q.difficulty",
"= datetime.datetime.now() return Quiz.objects.create( title=\"title\", description=\"Long description\", creator=user_A, category=category_m, category_name=\"m\", sub_category=sub_category_n, created=date, random_order=False,",
"== user_A assert quiz_q.category == category_m assert quiz_q.sub_category == sub_category_n assert isinstance(quiz_q.created, datetime.datetime)",
"== \"title\" def test_question(quiz_q, question_q): assert question_q.quiz == quiz_q assert question_q.difficulty == 1",
"question_q, user_A): assert answerUser.correct == True assert answerUser.question.get(pk=question_q.id) == question_q assert answerUser.user.get(pk=user_A.id) ==",
"assert isinstance(quiz_q.created, datetime.datetime) assert quiz_q.created.year == date.year assert quiz_q.created.month == date.month assert quiz_q.created.day",
"category_m(db): return Category.objects.create(category=\"m\") @pytest.fixture def sub_category_n(db, category_m): return SubCategory.objects.create(category=category_m, sub_category=\"n\") @pytest.fixture def user_A(db):",
"number=10, statistics=stats_s) @pytest.fixture def questionScore_qs(db, stats_s, question_q): return QuestionScore.objects.create( question=question_q, statistics=stats_s, score=5 )",
"quiz_q.created.year == date.year assert quiz_q.created.month == date.month assert quiz_q.created.day == date.day assert quiz_q.random_order",
"category_m assert isinstance(sub_category_n, SubCategory) assert str(sub_category_n) == \"n (m)\" def test_quiz(quiz_q, user_A, category_m,",
"category_m assert quiz_q.sub_category == sub_category_n assert isinstance(quiz_q.created, datetime.datetime) assert quiz_q.created.year == date.year assert",
"quiz_q): assert themeScore_ts.theme == \"t1\" assert themeScore_ts.score == 5 assert themeScore_ts.statistics == stats_s",
"(m)\" def test_quiz(quiz_q, user_A, category_m, sub_category_n): date = datetime.datetime.now() assert quiz_q.title == \"title\"",
"assert questionScore_qs.statistics == stats_s assert questionScore_qs.score == 5 def test_themeScore(themeScore_ts, stats_s, quiz_q): assert",
"\"t1\" assert themeScore_ts.score == 5 assert themeScore_ts.statistics == stats_s assert themeScore_ts.quiz == quiz_q",
"== 1 assert question_q.order == 1 assert question_q.figure == None assert question_q.content ==",
"sub_category=\"n\") @pytest.fixture def user_A(db): return User.objects.create_user(username=\"A\") @pytest.fixture def quiz_q(db, category_m, sub_category_n, user_A): date",
"@pytest.fixture def user_A(db): return User.objects.create_user(username=\"A\") @pytest.fixture def quiz_q(db, category_m, sub_category_n, user_A): date =",
"== \"t3\" assert str(question_q) == \"question\" def test_answerUser(answerUser, question_q, user_A): assert answerUser.correct ==",
"\"t2\" assert question_q.theme3 == \"t3\" assert str(question_q) == \"question\" def test_answerUser(answerUser, question_q, user_A):",
"import pytest ### FIXTURES ### @pytest.fixture def category_m(db): return Category.objects.create(category=\"m\") @pytest.fixture def sub_category_n(db,",
"== stats_s assert questionScore_qs.score == 5 def test_themeScore(themeScore_ts, stats_s, quiz_q): assert themeScore_ts.theme ==",
"assert grade_g.number == 10 assert grade_g.statistics == stats_s def test_questionScore(stats_s, question_q, questionScore_qs): assert",
"test_themeScore(themeScore_ts, stats_s, quiz_q): assert themeScore_ts.theme == \"t1\" assert themeScore_ts.score == 5 assert themeScore_ts.statistics",
"return Statistic.objects.create( quiz=quiz_q, number_participants=10, mean=15, easy=5, medium=5, difficult=5 ) @pytest.fixture def grade_g(db, stats_s):",
"== 5 def test_grade(grade_g, stats_s): assert grade_g.grade == 5 assert grade_g.number == 10",
"None assert question_q.theme1 == \"t1\" assert question_q.theme2 == \"t2\" assert question_q.theme3 == \"t3\"",
"def test_answerUser(answerUser, question_q, user_A): assert answerUser.correct == True assert answerUser.question.get(pk=question_q.id) == question_q assert",
"def sub_category_n(db, category_m): return SubCategory.objects.create(category=category_m, sub_category=\"n\") @pytest.fixture def user_A(db): return User.objects.create_user(username=\"A\") @pytest.fixture def",
"assert answerUser.user.get(pk=user_A.id) == user_A def test_statisc(stats_s, quiz_q): assert stats_s.quiz == quiz_q assert stats_s.number_participants",
"== 15 assert stats_s.easy == 5 assert stats_s.medium == 5 assert stats_s.difficult ==",
"from quiz.models import ( AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore,",
"statistics=stats_s) @pytest.fixture def questionScore_qs(db, stats_s, question_q): return QuestionScore.objects.create( question=question_q, statistics=stats_s, score=5 ) @pytest.fixture",
"grade_g.statistics == stats_s def test_questionScore(stats_s, question_q, questionScore_qs): assert questionScore_qs.question == question_q assert questionScore_qs.statistics",
"quiz_q.created.day == date.day assert quiz_q.random_order == False assert quiz_q.difficulty == 1 assert str(quiz_q)",
") @pytest.fixture def themeScore_ts(db, stats_s, quiz_q): return ThemeScore.objects.create( theme=\"t1\", score=5, statistics=stats_s, quiz=quiz_q )",
"quiz_q.creator == user_A assert quiz_q.category == category_m assert quiz_q.sub_category == sub_category_n assert isinstance(quiz_q.created,",
"question_q): assert question_q.quiz == quiz_q assert question_q.difficulty == 1 assert question_q.order == 1",
"quiz_q): assert stats_s.quiz == quiz_q assert stats_s.number_participants == 10 assert stats_s.mean == 15",
"grade_g(db, stats_s): return Grade.objects.create(grade=5, number=10, statistics=stats_s) @pytest.fixture def questionScore_qs(db, stats_s, question_q): return QuestionScore.objects.create(",
"return User.objects.create_user(username=\"A\") @pytest.fixture def quiz_q(db, category_m, sub_category_n, user_A): date = datetime.datetime.now() return Quiz.objects.create(",
"== \"m\" assert str(category_m) == \"m\" def test_sub_category(category_m, sub_category_n): assert sub_category_n.sub_category == \"n\"",
") import pytest ### FIXTURES ### @pytest.fixture def category_m(db): return Category.objects.create(category=\"m\") @pytest.fixture def",
"== date.month assert quiz_q.created.day == date.day assert quiz_q.random_order == False assert quiz_q.difficulty ==",
"1 assert question_q.order == 1 assert question_q.figure == None assert question_q.content == \"question\"",
"assert str(question_q) == \"question\" def test_answerUser(answerUser, question_q, user_A): assert answerUser.correct == True assert",
"def test_statisc(stats_s, quiz_q): assert stats_s.quiz == quiz_q assert stats_s.number_participants == 10 assert stats_s.mean",
"category_m): return SubCategory.objects.create(category=category_m, sub_category=\"n\") @pytest.fixture def user_A(db): return User.objects.create_user(username=\"A\") @pytest.fixture def quiz_q(db, category_m,",
"import datetime from django.contrib.auth.models import User from quiz.models import ( AnswerUser, Category, Grade,",
"question_q.figure == None assert question_q.content == \"question\" assert question_q.explanation == None assert question_q.theme1",
"TESTS ### def test_category(category_m): assert isinstance(category_m, Category) assert category_m.category == \"m\" assert str(category_m)",
"assert category_m.category == \"m\" assert str(category_m) == \"m\" def test_sub_category(category_m, sub_category_n): assert sub_category_n.sub_category",
"stats_s, quiz_q): return ThemeScore.objects.create( theme=\"t1\", score=5, statistics=stats_s, quiz=quiz_q ) ### TESTS ### def",
"\"m\" def test_sub_category(category_m, sub_category_n): assert sub_category_n.sub_category == \"n\" assert sub_category_n.category == category_m assert",
"creator=user_A, category=category_m, category_name=\"m\", sub_category=sub_category_n, created=date, random_order=False, difficulty=1, ) @pytest.fixture def question_q(db, quiz_q): return",
"== 10 assert stats_s.mean == 15 assert stats_s.easy == 5 assert stats_s.medium ==",
"quiz_q.random_order == False assert quiz_q.difficulty == 1 assert str(quiz_q) == \"title\" def test_question(quiz_q,",
"assert stats_s.number_participants == 10 assert stats_s.mean == 15 assert stats_s.easy == 5 assert",
"sub_category_n): assert sub_category_n.sub_category == \"n\" assert sub_category_n.category == category_m assert isinstance(sub_category_n, SubCategory) assert",
"SubCategory, ThemeScore, ) import pytest ### FIXTURES ### @pytest.fixture def category_m(db): return Category.objects.create(category=\"m\")",
"= AnswerUser.objects.create(correct=True) a.save() a.question.add(question_q) a.user.add(user_A) return a @pytest.fixture def stats_s(db, quiz_q): return Statistic.objects.create(",
"== 10 assert grade_g.statistics == stats_s def test_questionScore(stats_s, question_q, questionScore_qs): assert questionScore_qs.question ==",
"answerUser.user.get(pk=user_A.id) == user_A def test_statisc(stats_s, quiz_q): assert stats_s.quiz == quiz_q assert stats_s.number_participants ==",
"return Quiz.objects.create( title=\"title\", description=\"Long description\", creator=user_A, category=category_m, category_name=\"m\", sub_category=sub_category_n, created=date, random_order=False, difficulty=1, )",
"theme2=\"t2\", theme3=\"t3\", ) @pytest.fixture def answerUser(db, question_q, user_A): a = AnswerUser.objects.create(correct=True) a.save() a.question.add(question_q)",
"stats_s(db, quiz_q): return Statistic.objects.create( quiz=quiz_q, number_participants=10, mean=15, easy=5, medium=5, difficult=5 ) @pytest.fixture def",
"== 5 assert grade_g.number == 10 assert grade_g.statistics == stats_s def test_questionScore(stats_s, question_q,",
"stats_s, question_q): return QuestionScore.objects.create( question=question_q, statistics=stats_s, score=5 ) @pytest.fixture def themeScore_ts(db, stats_s, quiz_q):",
"False assert quiz_q.difficulty == 1 assert str(quiz_q) == \"title\" def test_question(quiz_q, question_q): assert",
"assert quiz_q.creator == user_A assert quiz_q.category == category_m assert quiz_q.sub_category == sub_category_n assert",
"quiz=quiz_q ) ### TESTS ### def test_category(category_m): assert isinstance(category_m, Category) assert category_m.category ==",
"quiz_q.sub_category == sub_category_n assert isinstance(quiz_q.created, datetime.datetime) assert quiz_q.created.year == date.year assert quiz_q.created.month ==",
"score=5, statistics=stats_s, quiz=quiz_q ) ### TESTS ### def test_category(category_m): assert isinstance(category_m, Category) assert",
"quiz_q.difficulty == 1 assert str(quiz_q) == \"title\" def test_question(quiz_q, question_q): assert question_q.quiz ==",
"description=\"Long description\", creator=user_A, category=category_m, category_name=\"m\", sub_category=sub_category_n, created=date, random_order=False, difficulty=1, ) @pytest.fixture def question_q(db,",
"Category.objects.create(category=\"m\") @pytest.fixture def sub_category_n(db, category_m): return SubCategory.objects.create(category=category_m, sub_category=\"n\") @pytest.fixture def user_A(db): return User.objects.create_user(username=\"A\")",
"str(category_m) == \"m\" def test_sub_category(category_m, sub_category_n): assert sub_category_n.sub_category == \"n\" assert sub_category_n.category ==",
"stats_s.medium == 5 assert stats_s.difficult == 5 def test_grade(grade_g, stats_s): assert grade_g.grade ==",
"== \"t1\" assert themeScore_ts.score == 5 assert themeScore_ts.statistics == stats_s assert themeScore_ts.quiz ==",
"return Grade.objects.create(grade=5, number=10, statistics=stats_s) @pytest.fixture def questionScore_qs(db, stats_s, question_q): return QuestionScore.objects.create( question=question_q, statistics=stats_s,",
"assert isinstance(category_m, Category) assert category_m.category == \"m\" assert str(category_m) == \"m\" def test_sub_category(category_m,",
"questionScore_qs): assert questionScore_qs.question == question_q assert questionScore_qs.statistics == stats_s assert questionScore_qs.score == 5",
"@pytest.fixture def grade_g(db, stats_s): return Grade.objects.create(grade=5, number=10, statistics=stats_s) @pytest.fixture def questionScore_qs(db, stats_s, question_q):",
"SubCategory) assert str(sub_category_n) == \"n (m)\" def test_quiz(quiz_q, user_A, category_m, sub_category_n): date =",
"Quiz.objects.create( title=\"title\", description=\"Long description\", creator=user_A, category=category_m, category_name=\"m\", sub_category=sub_category_n, created=date, random_order=False, difficulty=1, ) @pytest.fixture",
"== \"n\" assert sub_category_n.category == category_m assert isinstance(sub_category_n, SubCategory) assert str(sub_category_n) == \"n",
"score=5 ) @pytest.fixture def themeScore_ts(db, stats_s, quiz_q): return ThemeScore.objects.create( theme=\"t1\", score=5, statistics=stats_s, quiz=quiz_q",
"assert quiz_q.category == category_m assert quiz_q.sub_category == sub_category_n assert isinstance(quiz_q.created, datetime.datetime) assert quiz_q.created.year",
"### @pytest.fixture def category_m(db): return Category.objects.create(category=\"m\") @pytest.fixture def sub_category_n(db, category_m): return SubCategory.objects.create(category=category_m, sub_category=\"n\")",
"sub_category_n, user_A): date = datetime.datetime.now() return Quiz.objects.create( title=\"title\", description=\"Long description\", creator=user_A, category=category_m, category_name=\"m\",",
"stats_s.mean == 15 assert stats_s.easy == 5 assert stats_s.medium == 5 assert stats_s.difficult",
"assert quiz_q.difficulty == 1 assert str(quiz_q) == \"title\" def test_question(quiz_q, question_q): assert question_q.quiz",
"datetime from django.contrib.auth.models import User from quiz.models import ( AnswerUser, Category, Grade, Question,",
"True assert answerUser.question.get(pk=question_q.id) == question_q assert answerUser.user.get(pk=user_A.id) == user_A def test_statisc(stats_s, quiz_q): assert",
"question_q, user_A): a = AnswerUser.objects.create(correct=True) a.save() a.question.add(question_q) a.user.add(user_A) return a @pytest.fixture def stats_s(db,",
"assert stats_s.easy == 5 assert stats_s.medium == 5 assert stats_s.difficult == 5 def",
"== user_A def test_statisc(stats_s, quiz_q): assert stats_s.quiz == quiz_q assert stats_s.number_participants == 10",
"def user_A(db): return User.objects.create_user(username=\"A\") @pytest.fixture def quiz_q(db, category_m, sub_category_n, user_A): date = datetime.datetime.now()",
"assert question_q.order == 1 assert question_q.figure == None assert question_q.content == \"question\" assert",
"assert stats_s.mean == 15 assert stats_s.easy == 5 assert stats_s.medium == 5 assert",
"assert stats_s.quiz == quiz_q assert stats_s.number_participants == 10 assert stats_s.mean == 15 assert",
"QuestionScore, Quiz, Statistic, SubCategory, ThemeScore, ) import pytest ### FIXTURES ### @pytest.fixture def",
"question_q.theme3 == \"t3\" assert str(question_q) == \"question\" def test_answerUser(answerUser, question_q, user_A): assert answerUser.correct",
"date.year assert quiz_q.created.month == date.month assert quiz_q.created.day == date.day assert quiz_q.random_order == False",
"None assert question_q.content == \"question\" assert question_q.explanation == None assert question_q.theme1 == \"t1\"",
"category_m.category == \"m\" assert str(category_m) == \"m\" def test_sub_category(category_m, sub_category_n): assert sub_category_n.sub_category ==",
"difficult=5 ) @pytest.fixture def grade_g(db, stats_s): return Grade.objects.create(grade=5, number=10, statistics=stats_s) @pytest.fixture def questionScore_qs(db,",
"\"n (m)\" def test_quiz(quiz_q, user_A, category_m, sub_category_n): date = datetime.datetime.now() assert quiz_q.title ==",
"== \"n (m)\" def test_quiz(quiz_q, user_A, category_m, sub_category_n): date = datetime.datetime.now() assert quiz_q.title",
"== quiz_q assert stats_s.number_participants == 10 assert stats_s.mean == 15 assert stats_s.easy ==",
"category_m, sub_category_n, user_A): date = datetime.datetime.now() return Quiz.objects.create( title=\"title\", description=\"Long description\", creator=user_A, category=category_m,",
"test_questionScore(stats_s, question_q, questionScore_qs): assert questionScore_qs.question == question_q assert questionScore_qs.statistics == stats_s assert questionScore_qs.score",
"== 5 assert stats_s.difficult == 5 def test_grade(grade_g, stats_s): assert grade_g.grade == 5",
"Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore, ) import pytest ### FIXTURES",
"Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore, ) import pytest ### FIXTURES ###",
"questionScore_qs.score == 5 def test_themeScore(themeScore_ts, stats_s, quiz_q): assert themeScore_ts.theme == \"t1\" assert themeScore_ts.score",
"answerUser(db, question_q, user_A): a = AnswerUser.objects.create(correct=True) a.save() a.question.add(question_q) a.user.add(user_A) return a @pytest.fixture def",
"QuestionScore.objects.create( question=question_q, statistics=stats_s, score=5 ) @pytest.fixture def themeScore_ts(db, stats_s, quiz_q): return ThemeScore.objects.create( theme=\"t1\",",
"def test_questionScore(stats_s, question_q, questionScore_qs): assert questionScore_qs.question == question_q assert questionScore_qs.statistics == stats_s assert",
"== question_q assert questionScore_qs.statistics == stats_s assert questionScore_qs.score == 5 def test_themeScore(themeScore_ts, stats_s,",
"figure=None, content=\"question\", explanation=None, theme1=\"t1\", theme2=\"t2\", theme3=\"t3\", ) @pytest.fixture def answerUser(db, question_q, user_A): a",
"Statistic.objects.create( quiz=quiz_q, number_participants=10, mean=15, easy=5, medium=5, difficult=5 ) @pytest.fixture def grade_g(db, stats_s): return",
") @pytest.fixture def grade_g(db, stats_s): return Grade.objects.create(grade=5, number=10, statistics=stats_s) @pytest.fixture def questionScore_qs(db, stats_s,",
"str(question_q) == \"question\" def test_answerUser(answerUser, question_q, user_A): assert answerUser.correct == True assert answerUser.question.get(pk=question_q.id)",
"assert question_q.difficulty == 1 assert question_q.order == 1 assert question_q.figure == None assert",
"category_m, sub_category_n): date = datetime.datetime.now() assert quiz_q.title == \"title\" assert quiz_q.description == \"Long",
"isinstance(category_m, Category) assert category_m.category == \"m\" assert str(category_m) == \"m\" def test_sub_category(category_m, sub_category_n):",
"quiz=quiz_q, number_participants=10, mean=15, easy=5, medium=5, difficult=5 ) @pytest.fixture def grade_g(db, stats_s): return Grade.objects.create(grade=5,",
"question_q assert answerUser.user.get(pk=user_A.id) == user_A def test_statisc(stats_s, quiz_q): assert stats_s.quiz == quiz_q assert",
"\"title\" assert quiz_q.description == \"Long description\" assert quiz_q.creator == user_A assert quiz_q.category ==",
"AnswerUser.objects.create(correct=True) a.save() a.question.add(question_q) a.user.add(user_A) return a @pytest.fixture def stats_s(db, quiz_q): return Statistic.objects.create( quiz=quiz_q,",
"assert question_q.theme1 == \"t1\" assert question_q.theme2 == \"t2\" assert question_q.theme3 == \"t3\" assert",
"quiz_q assert stats_s.number_participants == 10 assert stats_s.mean == 15 assert stats_s.easy == 5",
"a.question.add(question_q) a.user.add(user_A) return a @pytest.fixture def stats_s(db, quiz_q): return Statistic.objects.create( quiz=quiz_q, number_participants=10, mean=15,",
"10 assert grade_g.statistics == stats_s def test_questionScore(stats_s, question_q, questionScore_qs): assert questionScore_qs.question == question_q",
"quiz.models import ( AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore, )",
"question_q.content == \"question\" assert question_q.explanation == None assert question_q.theme1 == \"t1\" assert question_q.theme2",
"datetime.datetime.now() return Quiz.objects.create( title=\"title\", description=\"Long description\", creator=user_A, category=category_m, category_name=\"m\", sub_category=sub_category_n, created=date, random_order=False, difficulty=1,",
"ThemeScore.objects.create( theme=\"t1\", score=5, statistics=stats_s, quiz=quiz_q ) ### TESTS ### def test_category(category_m): assert isinstance(category_m,",
"theme=\"t1\", score=5, statistics=stats_s, quiz=quiz_q ) ### TESTS ### def test_category(category_m): assert isinstance(category_m, Category)",
"= datetime.datetime.now() assert quiz_q.title == \"title\" assert quiz_q.description == \"Long description\" assert quiz_q.creator",
"question_q.quiz == quiz_q assert question_q.difficulty == 1 assert question_q.order == 1 assert question_q.figure",
"a @pytest.fixture def stats_s(db, quiz_q): return Statistic.objects.create( quiz=quiz_q, number_participants=10, mean=15, easy=5, medium=5, difficult=5",
"== sub_category_n assert isinstance(quiz_q.created, datetime.datetime) assert quiz_q.created.year == date.year assert quiz_q.created.month == date.month",
"@pytest.fixture def question_q(db, quiz_q): return Question.objects.create( quiz=quiz_q, difficulty=1, order=1, figure=None, content=\"question\", explanation=None, theme1=\"t1\",",
"import ( AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore, ) import",
"sub_category_n(db, category_m): return SubCategory.objects.create(category=category_m, sub_category=\"n\") @pytest.fixture def user_A(db): return User.objects.create_user(username=\"A\") @pytest.fixture def quiz_q(db,",
"10 assert stats_s.mean == 15 assert stats_s.easy == 5 assert stats_s.medium == 5",
"FIXTURES ### @pytest.fixture def category_m(db): return Category.objects.create(category=\"m\") @pytest.fixture def sub_category_n(db, category_m): return SubCategory.objects.create(category=category_m,",
"return a @pytest.fixture def stats_s(db, quiz_q): return Statistic.objects.create( quiz=quiz_q, number_participants=10, mean=15, easy=5, medium=5,",
"assert sub_category_n.category == category_m assert isinstance(sub_category_n, SubCategory) assert str(sub_category_n) == \"n (m)\" def",
"description\", creator=user_A, category=category_m, category_name=\"m\", sub_category=sub_category_n, created=date, random_order=False, difficulty=1, ) @pytest.fixture def question_q(db, quiz_q):",
"stats_s def test_questionScore(stats_s, question_q, questionScore_qs): assert questionScore_qs.question == question_q assert questionScore_qs.statistics == stats_s",
"title=\"title\", description=\"Long description\", creator=user_A, category=category_m, category_name=\"m\", sub_category=sub_category_n, created=date, random_order=False, difficulty=1, ) @pytest.fixture def",
"sub_category_n): date = datetime.datetime.now() assert quiz_q.title == \"title\" assert quiz_q.description == \"Long description\"",
"assert question_q.figure == None assert question_q.content == \"question\" assert question_q.explanation == None assert",
"== \"t1\" assert question_q.theme2 == \"t2\" assert question_q.theme3 == \"t3\" assert str(question_q) ==",
"Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore, ) import pytest ### FIXTURES ### @pytest.fixture",
"user_A assert quiz_q.category == category_m assert quiz_q.sub_category == sub_category_n assert isinstance(quiz_q.created, datetime.datetime) assert",
"test_sub_category(category_m, sub_category_n): assert sub_category_n.sub_category == \"n\" assert sub_category_n.category == category_m assert isinstance(sub_category_n, SubCategory)",
"order=1, figure=None, content=\"question\", explanation=None, theme1=\"t1\", theme2=\"t2\", theme3=\"t3\", ) @pytest.fixture def answerUser(db, question_q, user_A):",
"assert quiz_q.created.year == date.year assert quiz_q.created.month == date.month assert quiz_q.created.day == date.day assert",
"from django.contrib.auth.models import User from quiz.models import ( AnswerUser, Category, Grade, Question, QuestionScore,",
"@pytest.fixture def stats_s(db, quiz_q): return Statistic.objects.create( quiz=quiz_q, number_participants=10, mean=15, easy=5, medium=5, difficult=5 )",
"== \"m\" def test_sub_category(category_m, sub_category_n): assert sub_category_n.sub_category == \"n\" assert sub_category_n.category == category_m",
") @pytest.fixture def answerUser(db, question_q, user_A): a = AnswerUser.objects.create(correct=True) a.save() a.question.add(question_q) a.user.add(user_A) return",
"user_A): assert answerUser.correct == True assert answerUser.question.get(pk=question_q.id) == question_q assert answerUser.user.get(pk=user_A.id) == user_A",
"def test_sub_category(category_m, sub_category_n): assert sub_category_n.sub_category == \"n\" assert sub_category_n.category == category_m assert isinstance(sub_category_n,",
"assert str(sub_category_n) == \"n (m)\" def test_quiz(quiz_q, user_A, category_m, sub_category_n): date = datetime.datetime.now()",
"quiz_q.description == \"Long description\" assert quiz_q.creator == user_A assert quiz_q.category == category_m assert",
"stats_s assert questionScore_qs.score == 5 def test_themeScore(themeScore_ts, stats_s, quiz_q): assert themeScore_ts.theme == \"t1\"",
"== category_m assert isinstance(sub_category_n, SubCategory) assert str(sub_category_n) == \"n (m)\" def test_quiz(quiz_q, user_A,",
"category=category_m, category_name=\"m\", sub_category=sub_category_n, created=date, random_order=False, difficulty=1, ) @pytest.fixture def question_q(db, quiz_q): return Question.objects.create(",
"User from quiz.models import ( AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory,",
"def category_m(db): return Category.objects.create(category=\"m\") @pytest.fixture def sub_category_n(db, category_m): return SubCategory.objects.create(category=category_m, sub_category=\"n\") @pytest.fixture def",
"5 def test_themeScore(themeScore_ts, stats_s, quiz_q): assert themeScore_ts.theme == \"t1\" assert themeScore_ts.score == 5",
"difficulty=1, ) @pytest.fixture def question_q(db, quiz_q): return Question.objects.create( quiz=quiz_q, difficulty=1, order=1, figure=None, content=\"question\",",
"str(sub_category_n) == \"n (m)\" def test_quiz(quiz_q, user_A, category_m, sub_category_n): date = datetime.datetime.now() assert",
"== None assert question_q.theme1 == \"t1\" assert question_q.theme2 == \"t2\" assert question_q.theme3 ==",
"django.contrib.auth.models import User from quiz.models import ( AnswerUser, Category, Grade, Question, QuestionScore, Quiz,",
"themeScore_ts.theme == \"t1\" assert themeScore_ts.score == 5 assert themeScore_ts.statistics == stats_s assert themeScore_ts.quiz",
"theme3=\"t3\", ) @pytest.fixture def answerUser(db, question_q, user_A): a = AnswerUser.objects.create(correct=True) a.save() a.question.add(question_q) a.user.add(user_A)",
"assert quiz_q.created.day == date.day assert quiz_q.random_order == False assert quiz_q.difficulty == 1 assert",
"== stats_s def test_questionScore(stats_s, question_q, questionScore_qs): assert questionScore_qs.question == question_q assert questionScore_qs.statistics ==",
"== 5 def test_themeScore(themeScore_ts, stats_s, quiz_q): assert themeScore_ts.theme == \"t1\" assert themeScore_ts.score ==",
"stats_s): return Grade.objects.create(grade=5, number=10, statistics=stats_s) @pytest.fixture def questionScore_qs(db, stats_s, question_q): return QuestionScore.objects.create( question=question_q,",
"def test_category(category_m): assert isinstance(category_m, Category) assert category_m.category == \"m\" assert str(category_m) == \"m\"",
"user_A, category_m, sub_category_n): date = datetime.datetime.now() assert quiz_q.title == \"title\" assert quiz_q.description ==",
"== \"t2\" assert question_q.theme3 == \"t3\" assert str(question_q) == \"question\" def test_answerUser(answerUser, question_q,",
"def test_question(quiz_q, question_q): assert question_q.quiz == quiz_q assert question_q.difficulty == 1 assert question_q.order",
"statistics=stats_s, quiz=quiz_q ) ### TESTS ### def test_category(category_m): assert isinstance(category_m, Category) assert category_m.category",
"@pytest.fixture def answerUser(db, question_q, user_A): a = AnswerUser.objects.create(correct=True) a.save() a.question.add(question_q) a.user.add(user_A) return a",
"\"title\" def test_question(quiz_q, question_q): assert question_q.quiz == quiz_q assert question_q.difficulty == 1 assert",
"grade_g.number == 10 assert grade_g.statistics == stats_s def test_questionScore(stats_s, question_q, questionScore_qs): assert questionScore_qs.question",
"@pytest.fixture def category_m(db): return Category.objects.create(category=\"m\") @pytest.fixture def sub_category_n(db, category_m): return SubCategory.objects.create(category=category_m, sub_category=\"n\") @pytest.fixture",
"answerUser.question.get(pk=question_q.id) == question_q assert answerUser.user.get(pk=user_A.id) == user_A def test_statisc(stats_s, quiz_q): assert stats_s.quiz ==",
"date = datetime.datetime.now() return Quiz.objects.create( title=\"title\", description=\"Long description\", creator=user_A, category=category_m, category_name=\"m\", sub_category=sub_category_n, created=date,",
"\"n\" assert sub_category_n.category == category_m assert isinstance(sub_category_n, SubCategory) assert str(sub_category_n) == \"n (m)\"",
"question_q(db, quiz_q): return Question.objects.create( quiz=quiz_q, difficulty=1, order=1, figure=None, content=\"question\", explanation=None, theme1=\"t1\", theme2=\"t2\", theme3=\"t3\",",
"== 1 assert question_q.figure == None assert question_q.content == \"question\" assert question_q.explanation ==",
"test_answerUser(answerUser, question_q, user_A): assert answerUser.correct == True assert answerUser.question.get(pk=question_q.id) == question_q assert answerUser.user.get(pk=user_A.id)",
"user_A): date = datetime.datetime.now() return Quiz.objects.create( title=\"title\", description=\"Long description\", creator=user_A, category=category_m, category_name=\"m\", sub_category=sub_category_n,",
"description\" assert quiz_q.creator == user_A assert quiz_q.category == category_m assert quiz_q.sub_category == sub_category_n",
"assert grade_g.grade == 5 assert grade_g.number == 10 assert grade_g.statistics == stats_s def",
"test_grade(grade_g, stats_s): assert grade_g.grade == 5 assert grade_g.number == 10 assert grade_g.statistics ==",
"isinstance(quiz_q.created, datetime.datetime) assert quiz_q.created.year == date.year assert quiz_q.created.month == date.month assert quiz_q.created.day ==",
"question=question_q, statistics=stats_s, score=5 ) @pytest.fixture def themeScore_ts(db, stats_s, quiz_q): return ThemeScore.objects.create( theme=\"t1\", score=5,",
"assert question_q.theme2 == \"t2\" assert question_q.theme3 == \"t3\" assert str(question_q) == \"question\" def",
"themeScore_ts(db, stats_s, quiz_q): return ThemeScore.objects.create( theme=\"t1\", score=5, statistics=stats_s, quiz=quiz_q ) ### TESTS ###",
"def questionScore_qs(db, stats_s, question_q): return QuestionScore.objects.create( question=question_q, statistics=stats_s, score=5 ) @pytest.fixture def themeScore_ts(db,",
"== \"title\" assert quiz_q.description == \"Long description\" assert quiz_q.creator == user_A assert quiz_q.category",
"5 assert stats_s.medium == 5 assert stats_s.difficult == 5 def test_grade(grade_g, stats_s): assert",
"pytest ### FIXTURES ### @pytest.fixture def category_m(db): return Category.objects.create(category=\"m\") @pytest.fixture def sub_category_n(db, category_m):",
"5 assert stats_s.difficult == 5 def test_grade(grade_g, stats_s): assert grade_g.grade == 5 assert",
"== date.day assert quiz_q.random_order == False assert quiz_q.difficulty == 1 assert str(quiz_q) ==",
"test_category(category_m): assert isinstance(category_m, Category) assert category_m.category == \"m\" assert str(category_m) == \"m\" def",
"return ThemeScore.objects.create( theme=\"t1\", score=5, statistics=stats_s, quiz=quiz_q ) ### TESTS ### def test_category(category_m): assert",
"Statistic, SubCategory, ThemeScore, ) import pytest ### FIXTURES ### @pytest.fixture def category_m(db): return",
"== False assert quiz_q.difficulty == 1 assert str(quiz_q) == \"title\" def test_question(quiz_q, question_q):",
"quiz_q): return Question.objects.create( quiz=quiz_q, difficulty=1, order=1, figure=None, content=\"question\", explanation=None, theme1=\"t1\", theme2=\"t2\", theme3=\"t3\", )",
"test_statisc(stats_s, quiz_q): assert stats_s.quiz == quiz_q assert stats_s.number_participants == 10 assert stats_s.mean ==",
"datetime.datetime) assert quiz_q.created.year == date.year assert quiz_q.created.month == date.month assert quiz_q.created.day == date.day",
"stats_s.easy == 5 assert stats_s.medium == 5 assert stats_s.difficult == 5 def test_grade(grade_g,",
"SubCategory.objects.create(category=category_m, sub_category=\"n\") @pytest.fixture def user_A(db): return User.objects.create_user(username=\"A\") @pytest.fixture def quiz_q(db, category_m, sub_category_n, user_A):",
"def themeScore_ts(db, stats_s, quiz_q): return ThemeScore.objects.create( theme=\"t1\", score=5, statistics=stats_s, quiz=quiz_q ) ### TESTS",
"date.day assert quiz_q.random_order == False assert quiz_q.difficulty == 1 assert str(quiz_q) == \"title\"",
"\"t1\" assert question_q.theme2 == \"t2\" assert question_q.theme3 == \"t3\" assert str(question_q) == \"question\"",
"test_quiz(quiz_q, user_A, category_m, sub_category_n): date = datetime.datetime.now() assert quiz_q.title == \"title\" assert quiz_q.description",
"assert quiz_q.random_order == False assert quiz_q.difficulty == 1 assert str(quiz_q) == \"title\" def",
"== category_m assert quiz_q.sub_category == sub_category_n assert isinstance(quiz_q.created, datetime.datetime) assert quiz_q.created.year == date.year",
"stats_s): assert grade_g.grade == 5 assert grade_g.number == 10 assert grade_g.statistics == stats_s",
"Category) assert category_m.category == \"m\" assert str(category_m) == \"m\" def test_sub_category(category_m, sub_category_n): assert",
"def test_themeScore(themeScore_ts, stats_s, quiz_q): assert themeScore_ts.theme == \"t1\" assert themeScore_ts.score == 5 assert",
"Quiz, Statistic, SubCategory, ThemeScore, ) import pytest ### FIXTURES ### @pytest.fixture def category_m(db):",
"theme1=\"t1\", theme2=\"t2\", theme3=\"t3\", ) @pytest.fixture def answerUser(db, question_q, user_A): a = AnswerUser.objects.create(correct=True) a.save()",
"1 assert question_q.figure == None assert question_q.content == \"question\" assert question_q.explanation == None",
"quiz_q(db, category_m, sub_category_n, user_A): date = datetime.datetime.now() return Quiz.objects.create( title=\"title\", description=\"Long description\", creator=user_A,",
"question_q.explanation == None assert question_q.theme1 == \"t1\" assert question_q.theme2 == \"t2\" assert question_q.theme3",
"mean=15, easy=5, medium=5, difficult=5 ) @pytest.fixture def grade_g(db, stats_s): return Grade.objects.create(grade=5, number=10, statistics=stats_s)",
"assert quiz_q.title == \"title\" assert quiz_q.description == \"Long description\" assert quiz_q.creator == user_A",
"( AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore, ) import pytest",
"isinstance(sub_category_n, SubCategory) assert str(sub_category_n) == \"n (m)\" def test_quiz(quiz_q, user_A, category_m, sub_category_n): date",
"AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic, SubCategory, ThemeScore, ) import pytest ###",
"quiz_q.created.month == date.month assert quiz_q.created.day == date.day assert quiz_q.random_order == False assert quiz_q.difficulty",
"quiz_q): return Statistic.objects.create( quiz=quiz_q, number_participants=10, mean=15, easy=5, medium=5, difficult=5 ) @pytest.fixture def grade_g(db,",
"questionScore_qs(db, stats_s, question_q): return QuestionScore.objects.create( question=question_q, statistics=stats_s, score=5 ) @pytest.fixture def themeScore_ts(db, stats_s,",
"user_A(db): return User.objects.create_user(username=\"A\") @pytest.fixture def quiz_q(db, category_m, sub_category_n, user_A): date = datetime.datetime.now() return",
"return SubCategory.objects.create(category=category_m, sub_category=\"n\") @pytest.fixture def user_A(db): return User.objects.create_user(username=\"A\") @pytest.fixture def quiz_q(db, category_m, sub_category_n,",
"explanation=None, theme1=\"t1\", theme2=\"t2\", theme3=\"t3\", ) @pytest.fixture def answerUser(db, question_q, user_A): a = AnswerUser.objects.create(correct=True)",
"5 def test_grade(grade_g, stats_s): assert grade_g.grade == 5 assert grade_g.number == 10 assert",
"== \"Long description\" assert quiz_q.creator == user_A assert quiz_q.category == category_m assert quiz_q.sub_category",
"assert question_q.explanation == None assert question_q.theme1 == \"t1\" assert question_q.theme2 == \"t2\" assert",
"import User from quiz.models import ( AnswerUser, Category, Grade, Question, QuestionScore, Quiz, Statistic,",
"def stats_s(db, quiz_q): return Statistic.objects.create( quiz=quiz_q, number_participants=10, mean=15, easy=5, medium=5, difficult=5 ) @pytest.fixture",
"### def test_category(category_m): assert isinstance(category_m, Category) assert category_m.category == \"m\" assert str(category_m) ==",
"def test_quiz(quiz_q, user_A, category_m, sub_category_n): date = datetime.datetime.now() assert quiz_q.title == \"title\" assert",
"def quiz_q(db, category_m, sub_category_n, user_A): date = datetime.datetime.now() return Quiz.objects.create( title=\"title\", description=\"Long description\",",
"sub_category_n.category == category_m assert isinstance(sub_category_n, SubCategory) assert str(sub_category_n) == \"n (m)\" def test_quiz(quiz_q,",
"assert str(category_m) == \"m\" def test_sub_category(category_m, sub_category_n): assert sub_category_n.sub_category == \"n\" assert sub_category_n.category",
"sub_category=sub_category_n, created=date, random_order=False, difficulty=1, ) @pytest.fixture def question_q(db, quiz_q): return Question.objects.create( quiz=quiz_q, difficulty=1,",
"### FIXTURES ### @pytest.fixture def category_m(db): return Category.objects.create(category=\"m\") @pytest.fixture def sub_category_n(db, category_m): return",
"stats_s, quiz_q): assert themeScore_ts.theme == \"t1\" assert themeScore_ts.score == 5 assert themeScore_ts.statistics ==",
"number_participants=10, mean=15, easy=5, medium=5, difficult=5 ) @pytest.fixture def grade_g(db, stats_s): return Grade.objects.create(grade=5, number=10,",
"5 assert grade_g.number == 10 assert grade_g.statistics == stats_s def test_questionScore(stats_s, question_q, questionScore_qs):",
"a.save() a.question.add(question_q) a.user.add(user_A) return a @pytest.fixture def stats_s(db, quiz_q): return Statistic.objects.create( quiz=quiz_q, number_participants=10,",
"assert quiz_q.created.month == date.month assert quiz_q.created.day == date.day assert quiz_q.random_order == False assert",
"difficulty=1, order=1, figure=None, content=\"question\", explanation=None, theme1=\"t1\", theme2=\"t2\", theme3=\"t3\", ) @pytest.fixture def answerUser(db, question_q,",
"== True assert answerUser.question.get(pk=question_q.id) == question_q assert answerUser.user.get(pk=user_A.id) == user_A def test_statisc(stats_s, quiz_q):",
"created=date, random_order=False, difficulty=1, ) @pytest.fixture def question_q(db, quiz_q): return Question.objects.create( quiz=quiz_q, difficulty=1, order=1,",
"@pytest.fixture def quiz_q(db, category_m, sub_category_n, user_A): date = datetime.datetime.now() return Quiz.objects.create( title=\"title\", description=\"Long",
"return Question.objects.create( quiz=quiz_q, difficulty=1, order=1, figure=None, content=\"question\", explanation=None, theme1=\"t1\", theme2=\"t2\", theme3=\"t3\", ) @pytest.fixture",
"question_q assert questionScore_qs.statistics == stats_s assert questionScore_qs.score == 5 def test_themeScore(themeScore_ts, stats_s, quiz_q):",
"\"t3\" assert str(question_q) == \"question\" def test_answerUser(answerUser, question_q, user_A): assert answerUser.correct == True",
"@pytest.fixture def sub_category_n(db, category_m): return SubCategory.objects.create(category=category_m, sub_category=\"n\") @pytest.fixture def user_A(db): return User.objects.create_user(username=\"A\") @pytest.fixture",
"@pytest.fixture def themeScore_ts(db, stats_s, quiz_q): return ThemeScore.objects.create( theme=\"t1\", score=5, statistics=stats_s, quiz=quiz_q ) ###",
"assert question_q.quiz == quiz_q assert question_q.difficulty == 1 assert question_q.order == 1 assert",
"Question.objects.create( quiz=quiz_q, difficulty=1, order=1, figure=None, content=\"question\", explanation=None, theme1=\"t1\", theme2=\"t2\", theme3=\"t3\", ) @pytest.fixture def",
"assert answerUser.question.get(pk=question_q.id) == question_q assert answerUser.user.get(pk=user_A.id) == user_A def test_statisc(stats_s, quiz_q): assert stats_s.quiz",
"stats_s.quiz == quiz_q assert stats_s.number_participants == 10 assert stats_s.mean == 15 assert stats_s.easy",
"assert stats_s.medium == 5 assert stats_s.difficult == 5 def test_grade(grade_g, stats_s): assert grade_g.grade",
"assert themeScore_ts.theme == \"t1\" assert themeScore_ts.score == 5 assert themeScore_ts.statistics == stats_s assert",
"== None assert question_q.content == \"question\" assert question_q.explanation == None assert question_q.theme1 ==",
"user_A): a = AnswerUser.objects.create(correct=True) a.save() a.question.add(question_q) a.user.add(user_A) return a @pytest.fixture def stats_s(db, quiz_q):",
"== \"question\" assert question_q.explanation == None assert question_q.theme1 == \"t1\" assert question_q.theme2 ==",
"def grade_g(db, stats_s): return Grade.objects.create(grade=5, number=10, statistics=stats_s) @pytest.fixture def questionScore_qs(db, stats_s, question_q): return",
"questionScore_qs.question == question_q assert questionScore_qs.statistics == stats_s assert questionScore_qs.score == 5 def test_themeScore(themeScore_ts,",
"\"question\" def test_answerUser(answerUser, question_q, user_A): assert answerUser.correct == True assert answerUser.question.get(pk=question_q.id) == question_q",
"return QuestionScore.objects.create( question=question_q, statistics=stats_s, score=5 ) @pytest.fixture def themeScore_ts(db, stats_s, quiz_q): return ThemeScore.objects.create(",
"questionScore_qs.statistics == stats_s assert questionScore_qs.score == 5 def test_themeScore(themeScore_ts, stats_s, quiz_q): assert themeScore_ts.theme",
"assert sub_category_n.sub_category == \"n\" assert sub_category_n.category == category_m assert isinstance(sub_category_n, SubCategory) assert str(sub_category_n)",
"def question_q(db, quiz_q): return Question.objects.create( quiz=quiz_q, difficulty=1, order=1, figure=None, content=\"question\", explanation=None, theme1=\"t1\", theme2=\"t2\",",
"== \"question\" def test_answerUser(answerUser, question_q, user_A): assert answerUser.correct == True assert answerUser.question.get(pk=question_q.id) ==",
"def test_grade(grade_g, stats_s): assert grade_g.grade == 5 assert grade_g.number == 10 assert grade_g.statistics",
"question_q.theme2 == \"t2\" assert question_q.theme3 == \"t3\" assert str(question_q) == \"question\" def test_answerUser(answerUser,",
"quiz_q assert question_q.difficulty == 1 assert question_q.order == 1 assert question_q.figure == None",
"assert question_q.content == \"question\" assert question_q.explanation == None assert question_q.theme1 == \"t1\" assert",
"Grade.objects.create(grade=5, number=10, statistics=stats_s) @pytest.fixture def questionScore_qs(db, stats_s, question_q): return QuestionScore.objects.create( question=question_q, statistics=stats_s, score=5",
"assert questionScore_qs.question == question_q assert questionScore_qs.statistics == stats_s assert questionScore_qs.score == 5 def",
"easy=5, medium=5, difficult=5 ) @pytest.fixture def grade_g(db, stats_s): return Grade.objects.create(grade=5, number=10, statistics=stats_s) @pytest.fixture",
"stats_s.number_participants == 10 assert stats_s.mean == 15 assert stats_s.easy == 5 assert stats_s.medium",
"assert quiz_q.sub_category == sub_category_n assert isinstance(quiz_q.created, datetime.datetime) assert quiz_q.created.year == date.year assert quiz_q.created.month",
"date.month assert quiz_q.created.day == date.day assert quiz_q.random_order == False assert quiz_q.difficulty == 1",
"assert stats_s.difficult == 5 def test_grade(grade_g, stats_s): assert grade_g.grade == 5 assert grade_g.number",
"15 assert stats_s.easy == 5 assert stats_s.medium == 5 assert stats_s.difficult == 5",
"a = AnswerUser.objects.create(correct=True) a.save() a.question.add(question_q) a.user.add(user_A) return a @pytest.fixture def stats_s(db, quiz_q): return",
"question_q, questionScore_qs): assert questionScore_qs.question == question_q assert questionScore_qs.statistics == stats_s assert questionScore_qs.score ==",
"== 1 assert str(quiz_q) == \"title\" def test_question(quiz_q, question_q): assert question_q.quiz == quiz_q",
"assert quiz_q.description == \"Long description\" assert quiz_q.creator == user_A assert quiz_q.category == category_m",
"quiz_q.title == \"title\" assert quiz_q.description == \"Long description\" assert quiz_q.creator == user_A assert",
"content=\"question\", explanation=None, theme1=\"t1\", theme2=\"t2\", theme3=\"t3\", ) @pytest.fixture def answerUser(db, question_q, user_A): a =",
"sub_category_n assert isinstance(quiz_q.created, datetime.datetime) assert quiz_q.created.year == date.year assert quiz_q.created.month == date.month assert",
"question_q.theme1 == \"t1\" assert question_q.theme2 == \"t2\" assert question_q.theme3 == \"t3\" assert str(question_q)",
"def answerUser(db, question_q, user_A): a = AnswerUser.objects.create(correct=True) a.save() a.question.add(question_q) a.user.add(user_A) return a @pytest.fixture",
"question_q.order == 1 assert question_q.figure == None assert question_q.content == \"question\" assert question_q.explanation",
"user_A def test_statisc(stats_s, quiz_q): assert stats_s.quiz == quiz_q assert stats_s.number_participants == 10 assert",
"grade_g.grade == 5 assert grade_g.number == 10 assert grade_g.statistics == stats_s def test_questionScore(stats_s,",
"== question_q assert answerUser.user.get(pk=user_A.id) == user_A def test_statisc(stats_s, quiz_q): assert stats_s.quiz == quiz_q"
] |
[
"2 de calcular a hipotenusa (com o módulo math) import math co =",
"do cateto adjacente: ')) h = math.hypot(co, ca) print(f'a hipotenusa equivale a {h:.2f}')",
"= float(input('comprimento do cateto adjacente: ')) h = (co ** 2) + (ca",
"ca = float(input('comprimento do cateto adjacente: ')) h = math.hypot(co, ca) print(f'a hipotenusa",
"cateto oposto: ')) ca = float(input('comprimento do cateto adjacente: ')) h = (co",
"2) + (ca ** 2) ** (1/2) print(f'a hipotenusa equivale a {h:.2f}') '''",
"')) ca = float(input('comprimento do cateto adjacente: ')) h = (co ** 2)",
"import math co = float(input('comprimento do cateto oposto: ')) ca = float(input('comprimento do",
"o módulo math) co = float(input('comprimento do cateto oposto: ')) ca = float(input('comprimento",
"** (1/2) print(f'a hipotenusa equivale a {h:.2f}') ''' # Método 2 de calcular",
"print(f'a hipotenusa equivale a {h:.2f}') ''' # Método 2 de calcular a hipotenusa",
"(com o módulo math) import math co = float(input('comprimento do cateto oposto: '))",
"módulo math) import math co = float(input('comprimento do cateto oposto: ')) ca =",
"do cateto oposto: ')) ca = float(input('comprimento do cateto adjacente: ')) h =",
"(1/2) print(f'a hipotenusa equivale a {h:.2f}') ''' # Método 2 de calcular a",
"{h:.2f}') ''' # Método 2 de calcular a hipotenusa (com o módulo math)",
"+ (ca ** 2) ** (1/2) print(f'a hipotenusa equivale a {h:.2f}') ''' #",
"a {h:.2f}') ''' # Método 2 de calcular a hipotenusa (com o módulo",
"módulo math) co = float(input('comprimento do cateto oposto: ')) ca = float(input('comprimento do",
"** 2) + (ca ** 2) ** (1/2) print(f'a hipotenusa equivale a {h:.2f}')",
"(co ** 2) + (ca ** 2) ** (1/2) print(f'a hipotenusa equivale a",
"= float(input('comprimento do cateto adjacente: ')) h = math.hypot(co, ca) print(f'a hipotenusa equivale",
"= (co ** 2) + (ca ** 2) ** (1/2) print(f'a hipotenusa equivale",
"do cateto adjacente: ')) h = (co ** 2) + (ca ** 2)",
"float(input('comprimento do cateto adjacente: ')) h = (co ** 2) + (ca **",
"h = (co ** 2) + (ca ** 2) ** (1/2) print(f'a hipotenusa",
"math) import math co = float(input('comprimento do cateto oposto: ')) ca = float(input('comprimento",
"a hipotenusa (sem o módulo math) co = float(input('comprimento do cateto oposto: '))",
"calcular a hipotenusa (com o módulo math) import math co = float(input('comprimento do",
"adjacente: ')) h = (co ** 2) + (ca ** 2) ** (1/2)",
"cateto adjacente: ')) h = (co ** 2) + (ca ** 2) **",
"math) co = float(input('comprimento do cateto oposto: ')) ca = float(input('comprimento do cateto",
"''' #Método 1 de calcular a hipotenusa (sem o módulo math) co =",
"#Método 1 de calcular a hipotenusa (sem o módulo math) co = float(input('comprimento",
"cateto oposto: ')) ca = float(input('comprimento do cateto adjacente: ')) h = math.hypot(co,",
"hipotenusa (com o módulo math) import math co = float(input('comprimento do cateto oposto:",
"''' # Método 2 de calcular a hipotenusa (com o módulo math) import",
"(ca ** 2) ** (1/2) print(f'a hipotenusa equivale a {h:.2f}') ''' # Método",
"o módulo math) import math co = float(input('comprimento do cateto oposto: ')) ca",
"calcular a hipotenusa (sem o módulo math) co = float(input('comprimento do cateto oposto:",
"Método 2 de calcular a hipotenusa (com o módulo math) import math co",
"float(input('comprimento do cateto adjacente: ')) h = math.hypot(co, ca) print(f'a hipotenusa equivale a",
"float(input('comprimento do cateto oposto: ')) ca = float(input('comprimento do cateto adjacente: ')) h",
"oposto: ')) ca = float(input('comprimento do cateto adjacente: ')) h = (co **",
"')) ca = float(input('comprimento do cateto adjacente: ')) h = math.hypot(co, ca) print(f'a",
"equivale a {h:.2f}') ''' # Método 2 de calcular a hipotenusa (com o",
"(sem o módulo math) co = float(input('comprimento do cateto oposto: ')) ca =",
"a hipotenusa (com o módulo math) import math co = float(input('comprimento do cateto",
"math co = float(input('comprimento do cateto oposto: ')) ca = float(input('comprimento do cateto",
"ca = float(input('comprimento do cateto adjacente: ')) h = (co ** 2) +",
"de calcular a hipotenusa (com o módulo math) import math co = float(input('comprimento",
"2) ** (1/2) print(f'a hipotenusa equivale a {h:.2f}') ''' # Método 2 de",
"hipotenusa (sem o módulo math) co = float(input('comprimento do cateto oposto: ')) ca",
"** 2) ** (1/2) print(f'a hipotenusa equivale a {h:.2f}') ''' # Método 2",
"oposto: ')) ca = float(input('comprimento do cateto adjacente: ')) h = math.hypot(co, ca)",
"de calcular a hipotenusa (sem o módulo math) co = float(input('comprimento do cateto",
"co = float(input('comprimento do cateto oposto: ')) ca = float(input('comprimento do cateto adjacente:",
"= float(input('comprimento do cateto oposto: ')) ca = float(input('comprimento do cateto adjacente: '))",
"# Método 2 de calcular a hipotenusa (com o módulo math) import math",
"hipotenusa equivale a {h:.2f}') ''' # Método 2 de calcular a hipotenusa (com",
"1 de calcular a hipotenusa (sem o módulo math) co = float(input('comprimento do",
"')) h = (co ** 2) + (ca ** 2) ** (1/2) print(f'a"
] |
[
"return id.split(\"-\")[0] else: return id.split(\".\")[0] def convert_date_to_ISO(datestring): return \"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4], datestring[4:6], datestring[6:8], datestring[8:10],",
"schema: { \"id\":int, \"ref_no\": int, \"date\": string, \"account\": int, account code, \"isincome\": bool,",
"int(trans[3].text), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CASHREWARDS_CC\", \"payee\": trans[6].text, \"notes\": { \"bank\": \"\", \"personal\": \"\"",
"}) return transactions def fetch_acct_info(): print(\"Updating Account Information\") accounts = [] parser =",
"import_acct_statement(): print(\"Importing BOFA Account Statement\") parser = OFXTree() parser.parse(\"./data/stmt.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] transactions",
"trans in transactions_root[2:]: transactions.append({ \"id\": 0, \"ref_no\": int(convert_acct_fitid(trans[3].text)), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CHECKING\", \"payee\":",
"in transactions_root[2:]: transactions.append({ \"id\": 0, \"ref_no\": int(convert_acct_fitid(trans[3].text)), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CHECKING\", \"payee\": trans[4].text,",
"ofxtools.Parser import OFXTree \"\"\" statement schema: { \"id\":int, \"ref_no\": int, \"date\": string, \"account\":",
"\"categories\": [], \"linked_transaction\": int, \"countinbudget\": bool } ] } \"\"\" def fetch_all_transactions(): cc",
"parser.find(\".//BANKTRANLIST\")[:] id = 0 transactions = [] for trans in transactions_root[2:]: transactions.append({ \"id\":",
"import_cc_statement(): print(\"Importing BOFA CC Statement\") parser = OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] id",
"parser.parse(\"./data/stmt.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] transactions = [] for trans in transactions_root[2:]: transactions.append({ \"id\":",
"OFXTree() parser.parse(\"./data/stmt.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] transactions = [] for trans in transactions_root[2:]: transactions.append({",
"Account Information\") accounts = [] parser = OFXTree() parser.parse('./data/currentTransaction_1626.qfx') accounts.append({ \"name\": \"BOFA_CASHREWARDS_CC\", \"id\":",
"\"notes\": { \"bank\": string, \"personal\": string }, \"categories\": [strings], \"totalamount\": float, \"splits\":[ {",
"\"splits\": [] }) return transactions def import_cc_statement(): print(\"Importing BOFA CC Statement\") parser =",
"convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) parser.parse('./data/stmt.qfx') accounts.append({ \"name\": \"BOFA_CHECKING\", \"id\": 0, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) })",
"\"name\": \"BOFA_CASHREWARDS_CC\", \"id\": 2, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) parser.parse('./data/stmt.qfx') accounts.append({ \"name\": \"BOFA_CHECKING\",",
"= parser.find(\".//BANKTRANLIST\")[:] id = 0 transactions = [] for trans in transactions_root[2:]: transactions.append({",
"string, \"notes\": { \"bank\": string, \"personal\": string }, \"categories\": [strings], \"totalamount\": float, \"splits\":[",
"trans[4].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" }, \"categories\": [], \"totalamount\": float(trans[2].text), \"splits\":",
"convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) return accounts if __name__ == '__main__': accts = fetch_acct_info() for acct",
"0 transactions = [] for trans in transactions_root[2:]: transactions.append({ \"id\": id, \"ref_no\": int(trans[3].text),",
"datestring[10:12], datestring[12:14], ) def import_acct_statement(): print(\"Importing BOFA Account Statement\") parser = OFXTree() parser.parse(\"./data/stmt.qfx\")",
"\"personal\": \"\" }, \"categories\": [], \"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions def",
"{ \"bank\": string, \"personal\": string }, \"categories\": [strings], \"totalamount\": float, \"splits\":[ { \"amount\":",
"id = 0 transactions = [] for trans in transactions_root[2:]: transactions.append({ \"id\": id,",
"convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CASHREWARDS_CC\", \"payee\": trans[6].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" }, \"categories\":",
"string, \"personal\": string }, \"categories\": [strings], \"totalamount\": float, \"splits\":[ { \"amount\": float, \"categories\":",
"transactions def import_cc_statement(): print(\"Importing BOFA CC Statement\") parser = OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root =",
"2, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) parser.parse('./data/stmt.qfx') accounts.append({ \"name\": \"BOFA_CHECKING\", \"id\": 0, \"balance\":",
"\"notes\": { \"bank\": \"\", \"personal\": \"\" }, \"categories\": [], \"totalamount\": float(trans[2].text), \"splits\": []",
"parser.find(\".//BANKTRANLIST\")[:] transactions = [] for trans in transactions_root[2:]: transactions.append({ \"id\": 0, \"ref_no\": int(convert_acct_fitid(trans[3].text)),",
"\"countinbudget\": bool } ] } \"\"\" def fetch_all_transactions(): cc = import_cc_statement() acct =",
"bool, \"countinbudget\": bool, \"payee\": string, \"notes\": { \"bank\": string, \"personal\": string }, \"categories\":",
"bool, \"payee\": string, \"notes\": { \"bank\": string, \"personal\": string }, \"categories\": [strings], \"totalamount\":",
"\"countinbudget\": bool, \"payee\": string, \"notes\": { \"bank\": string, \"personal\": string }, \"categories\": [strings],",
"{ \"amount\": float, \"categories\": [], \"linked_transaction\": int, \"countinbudget\": bool } ] } \"\"\"",
"\"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions def import_cc_statement(): print(\"Importing BOFA CC Statement\")",
"\"splits\": [] }) return transactions def fetch_acct_info(): print(\"Updating Account Information\") accounts = []",
"transactions_root = parser.find(\".//BANKTRANLIST\")[:] transactions = [] for trans in transactions_root[2:]: transactions.append({ \"id\": 0,",
"\"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4], datestring[4:6], datestring[6:8], datestring[8:10], datestring[10:12], datestring[12:14], ) def import_acct_statement(): print(\"Importing BOFA Account",
"datestring[8:10], datestring[10:12], datestring[12:14], ) def import_acct_statement(): print(\"Importing BOFA Account Statement\") parser = OFXTree()",
"\"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CHECKING\", \"payee\": trans[4].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" },",
"= [] for trans in transactions_root[2:]: transactions.append({ \"id\": id, \"ref_no\": int(trans[3].text), \"date\": convert_date_to_ISO(trans[1].text),",
"\"categories\": [strings], \"totalamount\": float, \"splits\":[ { \"amount\": float, \"categories\": [], \"linked_transaction\": int, \"countinbudget\":",
"fetch_all_transactions(): cc = import_cc_statement() acct = import_acct_statement() return cc + acct def convert_acct_fitid(id):",
"\"BOFA_CASHREWARDS_CC\", \"id\": 2, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) parser.parse('./data/stmt.qfx') accounts.append({ \"name\": \"BOFA_CHECKING\", \"id\":",
"transactions def fetch_acct_info(): print(\"Updating Account Information\") accounts = [] parser = OFXTree() parser.parse('./data/currentTransaction_1626.qfx')",
"[strings], \"totalamount\": float, \"splits\":[ { \"amount\": float, \"categories\": [], \"linked_transaction\": int, \"countinbudget\": bool",
"float, \"splits\":[ { \"amount\": float, \"categories\": [], \"linked_transaction\": int, \"countinbudget\": bool } ]",
"convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CHECKING\", \"payee\": trans[4].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" }, \"categories\":",
"id.split(\"-\")[0] else: return id.split(\".\")[0] def convert_date_to_ISO(datestring): return \"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4], datestring[4:6], datestring[6:8], datestring[8:10], datestring[10:12],",
"float(trans[2].text), \"splits\": [] }) return transactions def import_cc_statement(): print(\"Importing BOFA CC Statement\") parser",
"parser.parse('./data/currentTransaction_1626.qfx') accounts.append({ \"name\": \"BOFA_CASHREWARDS_CC\", \"id\": 2, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) parser.parse('./data/stmt.qfx') accounts.append({",
"\"date\": string, \"account\": int, account code, \"isincome\": bool, \"countinbudget\": bool, \"payee\": string, \"notes\":",
"trans in transactions_root[2:]: transactions.append({ \"id\": id, \"ref_no\": int(trans[3].text), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CASHREWARDS_CC\", \"payee\":",
"parser.parse('./data/stmt.qfx') accounts.append({ \"name\": \"BOFA_CHECKING\", \"id\": 0, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) return accounts",
"parser = OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] id = 0 transactions = []",
"\"categories\": [], \"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions def import_cc_statement(): print(\"Importing BOFA",
"[], \"linked_transaction\": int, \"countinbudget\": bool } ] } \"\"\" def fetch_all_transactions(): cc =",
"transactions = [] for trans in transactions_root[2:]: transactions.append({ \"id\": 0, \"ref_no\": int(convert_acct_fitid(trans[3].text)), \"date\":",
"}) return accounts if __name__ == '__main__': accts = fetch_acct_info() for acct in",
"account code, \"isincome\": bool, \"countinbudget\": bool, \"payee\": string, \"notes\": { \"bank\": string, \"personal\":",
"\"ref_no\": int(trans[3].text), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CASHREWARDS_CC\", \"payee\": trans[6].text, \"notes\": { \"bank\": \"\", \"personal\":",
"import json from ofxtools.Parser import OFXTree \"\"\" statement schema: { \"id\":int, \"ref_no\": int,",
"= import_acct_statement() return cc + acct def convert_acct_fitid(id): if \"-\" in id: return",
"\"ref_no\": int(convert_acct_fitid(trans[3].text)), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CHECKING\", \"payee\": trans[4].text, \"notes\": { \"bank\": \"\", \"personal\":",
"\"BOFA_CASHREWARDS_CC\", \"payee\": trans[6].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" }, \"categories\": [], \"totalamount\":",
"\"name\": \"BOFA_CHECKING\", \"id\": 0, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) return accounts if __name__",
"convert_date_to_ISO(datestring): return \"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4], datestring[4:6], datestring[6:8], datestring[8:10], datestring[10:12], datestring[12:14], ) def import_acct_statement(): print(\"Importing",
"} \"\"\" def fetch_all_transactions(): cc = import_cc_statement() acct = import_acct_statement() return cc +",
"def convert_acct_fitid(id): if \"-\" in id: return id.split(\"-\")[0] else: return id.split(\".\")[0] def convert_date_to_ISO(datestring):",
"= [] parser = OFXTree() parser.parse('./data/currentTransaction_1626.qfx') accounts.append({ \"name\": \"BOFA_CASHREWARDS_CC\", \"id\": 2, \"balance\": parser.find(\".//BALAMT\").text,",
"import OFXTree \"\"\" statement schema: { \"id\":int, \"ref_no\": int, \"date\": string, \"account\": int,",
"accounts = [] parser = OFXTree() parser.parse('./data/currentTransaction_1626.qfx') accounts.append({ \"name\": \"BOFA_CASHREWARDS_CC\", \"id\": 2, \"balance\":",
"}) parser.parse('./data/stmt.qfx') accounts.append({ \"name\": \"BOFA_CHECKING\", \"id\": 0, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) return",
"convert_acct_fitid(id): if \"-\" in id: return id.split(\"-\")[0] else: return id.split(\".\")[0] def convert_date_to_ISO(datestring): return",
"for trans in transactions_root[2:]: transactions.append({ \"id\": id, \"ref_no\": int(trans[3].text), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CASHREWARDS_CC\",",
"string, \"account\": int, account code, \"isincome\": bool, \"countinbudget\": bool, \"payee\": string, \"notes\": {",
"\"account\": int, account code, \"isincome\": bool, \"countinbudget\": bool, \"payee\": string, \"notes\": { \"bank\":",
"def convert_date_to_ISO(datestring): return \"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4], datestring[4:6], datestring[6:8], datestring[8:10], datestring[10:12], datestring[12:14], ) def import_acct_statement():",
"trans[6].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" }, \"categories\": [], \"totalamount\": float(trans[2].text), \"splits\":",
"from ofxtools.Parser import OFXTree \"\"\" statement schema: { \"id\":int, \"ref_no\": int, \"date\": string,",
"import csv import json from ofxtools.Parser import OFXTree \"\"\" statement schema: { \"id\":int,",
"in id: return id.split(\"-\")[0] else: return id.split(\".\")[0] def convert_date_to_ISO(datestring): return \"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4], datestring[4:6],",
"\"id\": 2, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) parser.parse('./data/stmt.qfx') accounts.append({ \"name\": \"BOFA_CHECKING\", \"id\": 0,",
"\"totalamount\": float, \"splits\":[ { \"amount\": float, \"categories\": [], \"linked_transaction\": int, \"countinbudget\": bool }",
"id: return id.split(\"-\")[0] else: return id.split(\".\")[0] def convert_date_to_ISO(datestring): return \"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4], datestring[4:6], datestring[6:8],",
"] } \"\"\" def fetch_all_transactions(): cc = import_cc_statement() acct = import_acct_statement() return cc",
"def fetch_all_transactions(): cc = import_cc_statement() acct = import_acct_statement() return cc + acct def",
"json from ofxtools.Parser import OFXTree \"\"\" statement schema: { \"id\":int, \"ref_no\": int, \"date\":",
"parser = OFXTree() parser.parse(\"./data/stmt.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] transactions = [] for trans in",
"float(trans[2].text), \"splits\": [] }) return transactions def fetch_acct_info(): print(\"Updating Account Information\") accounts =",
"transactions.append({ \"id\": 0, \"ref_no\": int(convert_acct_fitid(trans[3].text)), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CHECKING\", \"payee\": trans[4].text, \"notes\": {",
"= import_cc_statement() acct = import_acct_statement() return cc + acct def convert_acct_fitid(id): if \"-\"",
"= OFXTree() parser.parse('./data/currentTransaction_1626.qfx') accounts.append({ \"name\": \"BOFA_CASHREWARDS_CC\", \"id\": 2, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) })",
"OFXTree() parser.parse('./data/currentTransaction_1626.qfx') accounts.append({ \"name\": \"BOFA_CASHREWARDS_CC\", \"id\": 2, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) parser.parse('./data/stmt.qfx')",
"return transactions def import_cc_statement(): print(\"Importing BOFA CC Statement\") parser = OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root",
"\"categories\": [], \"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions def fetch_acct_info(): print(\"Updating Account",
"print(\"Importing BOFA CC Statement\") parser = OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] id =",
"= OFXTree() parser.parse(\"./data/stmt.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] transactions = [] for trans in transactions_root[2:]:",
"\"payee\": trans[6].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" }, \"categories\": [], \"totalamount\": float(trans[2].text),",
"datestring[:4], datestring[4:6], datestring[6:8], datestring[8:10], datestring[10:12], datestring[12:14], ) def import_acct_statement(): print(\"Importing BOFA Account Statement\")",
"code, \"isincome\": bool, \"countinbudget\": bool, \"payee\": string, \"notes\": { \"bank\": string, \"personal\": string",
"CC Statement\") parser = OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] id = 0 transactions",
"statement schema: { \"id\":int, \"ref_no\": int, \"date\": string, \"account\": int, account code, \"isincome\":",
"cc + acct def convert_acct_fitid(id): if \"-\" in id: return id.split(\"-\")[0] else: return",
"import_acct_statement() return cc + acct def convert_acct_fitid(id): if \"-\" in id: return id.split(\"-\")[0]",
"\"payee\": string, \"notes\": { \"bank\": string, \"personal\": string }, \"categories\": [strings], \"totalamount\": float,",
"cc = import_cc_statement() acct = import_acct_statement() return cc + acct def convert_acct_fitid(id): if",
"[] for trans in transactions_root[2:]: transactions.append({ \"id\": id, \"ref_no\": int(trans[3].text), \"date\": convert_date_to_ISO(trans[1].text), \"account\":",
"transactions.append({ \"id\": id, \"ref_no\": int(trans[3].text), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CASHREWARDS_CC\", \"payee\": trans[6].text, \"notes\": {",
"int, \"date\": string, \"account\": int, account code, \"isincome\": bool, \"countinbudget\": bool, \"payee\": string,",
"\"id\": 0, \"ref_no\": int(convert_acct_fitid(trans[3].text)), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CHECKING\", \"payee\": trans[4].text, \"notes\": { \"bank\":",
"BOFA Account Statement\") parser = OFXTree() parser.parse(\"./data/stmt.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] transactions = []",
"BOFA CC Statement\") parser = OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] id = 0",
"[] }) return transactions def fetch_acct_info(): print(\"Updating Account Information\") accounts = [] parser",
"+ acct def convert_acct_fitid(id): if \"-\" in id: return id.split(\"-\")[0] else: return id.split(\".\")[0]",
"accounts.append({ \"name\": \"BOFA_CHECKING\", \"id\": 0, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) return accounts if",
"\"\" }, \"categories\": [], \"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions def fetch_acct_info():",
"\"\" }, \"categories\": [], \"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions def import_cc_statement():",
"acct = import_acct_statement() return cc + acct def convert_acct_fitid(id): if \"-\" in id:",
"string }, \"categories\": [strings], \"totalamount\": float, \"splits\":[ { \"amount\": float, \"categories\": [], \"linked_transaction\":",
"= 0 transactions = [] for trans in transactions_root[2:]: transactions.append({ \"id\": id, \"ref_no\":",
"transactions = [] for trans in transactions_root[2:]: transactions.append({ \"id\": id, \"ref_no\": int(trans[3].text), \"date\":",
"\"account\": \"BOFA_CASHREWARDS_CC\", \"payee\": trans[6].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" }, \"categories\": [],",
"transactions_root = parser.find(\".//BANKTRANLIST\")[:] id = 0 transactions = [] for trans in transactions_root[2:]:",
"\"personal\": string }, \"categories\": [strings], \"totalamount\": float, \"splits\":[ { \"amount\": float, \"categories\": [],",
"transactions_root[2:]: transactions.append({ \"id\": id, \"ref_no\": int(trans[3].text), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CASHREWARDS_CC\", \"payee\": trans[6].text, \"notes\":",
"parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) return accounts if __name__ == '__main__': accts = fetch_acct_info()",
"\"payee\": trans[4].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" }, \"categories\": [], \"totalamount\": float(trans[2].text),",
"\"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) return accounts if __name__ == '__main__': accts =",
"return id.split(\".\")[0] def convert_date_to_ISO(datestring): return \"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4], datestring[4:6], datestring[6:8], datestring[8:10], datestring[10:12], datestring[12:14], )",
"def fetch_acct_info(): print(\"Updating Account Information\") accounts = [] parser = OFXTree() parser.parse('./data/currentTransaction_1626.qfx') accounts.append({",
"datestring[4:6], datestring[6:8], datestring[8:10], datestring[10:12], datestring[12:14], ) def import_acct_statement(): print(\"Importing BOFA Account Statement\") parser",
"\"id\":int, \"ref_no\": int, \"date\": string, \"account\": int, account code, \"isincome\": bool, \"countinbudget\": bool,",
"int, account code, \"isincome\": bool, \"countinbudget\": bool, \"payee\": string, \"notes\": { \"bank\": string,",
"return accounts if __name__ == '__main__': accts = fetch_acct_info() for acct in accts:",
"OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] id = 0 transactions = [] for trans",
"return cc + acct def convert_acct_fitid(id): if \"-\" in id: return id.split(\"-\")[0] else:",
"}, \"categories\": [strings], \"totalamount\": float, \"splits\":[ { \"amount\": float, \"categories\": [], \"linked_transaction\": int,",
"parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) parser.parse('./data/stmt.qfx') accounts.append({ \"name\": \"BOFA_CHECKING\", \"id\": 0, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\":",
"\"id\": id, \"ref_no\": int(trans[3].text), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CASHREWARDS_CC\", \"payee\": trans[6].text, \"notes\": { \"bank\":",
"print(\"Importing BOFA Account Statement\") parser = OFXTree() parser.parse(\"./data/stmt.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] transactions =",
"else: return id.split(\".\")[0] def convert_date_to_ISO(datestring): return \"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4], datestring[4:6], datestring[6:8], datestring[8:10], datestring[10:12], datestring[12:14],",
"id, \"ref_no\": int(trans[3].text), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CASHREWARDS_CC\", \"payee\": trans[6].text, \"notes\": { \"bank\": \"\",",
") def import_acct_statement(): print(\"Importing BOFA Account Statement\") parser = OFXTree() parser.parse(\"./data/stmt.qfx\") transactions_root =",
"\"-\" in id: return id.split(\"-\")[0] else: return id.split(\".\")[0] def convert_date_to_ISO(datestring): return \"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4],",
"\"bank\": \"\", \"personal\": \"\" }, \"categories\": [], \"totalamount\": float(trans[2].text), \"splits\": [] }) return",
"id.split(\".\")[0] def convert_date_to_ISO(datestring): return \"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4], datestring[4:6], datestring[6:8], datestring[8:10], datestring[10:12], datestring[12:14], ) def",
"int(convert_acct_fitid(trans[3].text)), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CHECKING\", \"payee\": trans[4].text, \"notes\": { \"bank\": \"\", \"personal\": \"\"",
"accounts if __name__ == '__main__': accts = fetch_acct_info() for acct in accts: print(acct)",
"\"isincome\": bool, \"countinbudget\": bool, \"payee\": string, \"notes\": { \"bank\": string, \"personal\": string },",
"def import_acct_statement(): print(\"Importing BOFA Account Statement\") parser = OFXTree() parser.parse(\"./data/stmt.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:]",
"for trans in transactions_root[2:]: transactions.append({ \"id\": 0, \"ref_no\": int(convert_acct_fitid(trans[3].text)), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CHECKING\",",
"\"BOFA_CHECKING\", \"payee\": trans[4].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" }, \"categories\": [], \"totalamount\":",
"{ \"id\":int, \"ref_no\": int, \"date\": string, \"account\": int, account code, \"isincome\": bool, \"countinbudget\":",
"\"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CASHREWARDS_CC\", \"payee\": trans[6].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" },",
"float, \"categories\": [], \"linked_transaction\": int, \"countinbudget\": bool } ] } \"\"\" def fetch_all_transactions():",
"\"amount\": float, \"categories\": [], \"linked_transaction\": int, \"countinbudget\": bool } ] } \"\"\" def",
"}, \"categories\": [], \"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions def import_cc_statement(): print(\"Importing",
"Information\") accounts = [] parser = OFXTree() parser.parse('./data/currentTransaction_1626.qfx') accounts.append({ \"name\": \"BOFA_CASHREWARDS_CC\", \"id\": 2,",
"transactions_root[2:]: transactions.append({ \"id\": 0, \"ref_no\": int(convert_acct_fitid(trans[3].text)), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CHECKING\", \"payee\": trans[4].text, \"notes\":",
"\"id\": 0, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) return accounts if __name__ == '__main__':",
"}) return transactions def import_cc_statement(): print(\"Importing BOFA CC Statement\") parser = OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\")",
"parser = OFXTree() parser.parse('./data/currentTransaction_1626.qfx') accounts.append({ \"name\": \"BOFA_CASHREWARDS_CC\", \"id\": 2, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text)",
"return transactions def fetch_acct_info(): print(\"Updating Account Information\") accounts = [] parser = OFXTree()",
"= OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] id = 0 transactions = [] for",
"\"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) parser.parse('./data/stmt.qfx') accounts.append({ \"name\": \"BOFA_CHECKING\", \"id\": 0, \"balance\": parser.find(\".//BALAMT\").text,",
"} ] } \"\"\" def fetch_all_transactions(): cc = import_cc_statement() acct = import_acct_statement() return",
"if \"-\" in id: return id.split(\"-\")[0] else: return id.split(\".\")[0] def convert_date_to_ISO(datestring): return \"{}-{}-{}T{}:{}:{}Z\".format(",
"Account Statement\") parser = OFXTree() parser.parse(\"./data/stmt.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] transactions = [] for",
"{ \"bank\": \"\", \"personal\": \"\" }, \"categories\": [], \"totalamount\": float(trans[2].text), \"splits\": [] })",
"[] }) return transactions def import_cc_statement(): print(\"Importing BOFA CC Statement\") parser = OFXTree()",
"OFXTree \"\"\" statement schema: { \"id\":int, \"ref_no\": int, \"date\": string, \"account\": int, account",
"\"bank\": string, \"personal\": string }, \"categories\": [strings], \"totalamount\": float, \"splits\":[ { \"amount\": float,",
"\"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions def fetch_acct_info(): print(\"Updating Account Information\") accounts",
"fetch_acct_info(): print(\"Updating Account Information\") accounts = [] parser = OFXTree() parser.parse('./data/currentTransaction_1626.qfx') accounts.append({ \"name\":",
"import_cc_statement() acct = import_acct_statement() return cc + acct def convert_acct_fitid(id): if \"-\" in",
"Statement\") parser = OFXTree() parser.parse(\"./data/stmt.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] transactions = [] for trans",
"parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] id = 0 transactions = [] for trans in",
"accounts.append({ \"name\": \"BOFA_CASHREWARDS_CC\", \"id\": 2, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) parser.parse('./data/stmt.qfx') accounts.append({ \"name\":",
"\"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) return accounts if __name__ == '__main__': accts = fetch_acct_info() for",
"\"account\": \"BOFA_CHECKING\", \"payee\": trans[4].text, \"notes\": { \"bank\": \"\", \"personal\": \"\" }, \"categories\": [],",
"= [] for trans in transactions_root[2:]: transactions.append({ \"id\": 0, \"ref_no\": int(convert_acct_fitid(trans[3].text)), \"date\": convert_date_to_ISO(trans[1].text),",
"datestring[6:8], datestring[8:10], datestring[10:12], datestring[12:14], ) def import_acct_statement(): print(\"Importing BOFA Account Statement\") parser =",
"int, \"countinbudget\": bool } ] } \"\"\" def fetch_all_transactions(): cc = import_cc_statement() acct",
"csv import json from ofxtools.Parser import OFXTree \"\"\" statement schema: { \"id\":int, \"ref_no\":",
"[], \"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions def import_cc_statement(): print(\"Importing BOFA CC",
"0, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) return accounts if __name__ == '__main__': accts",
"[] for trans in transactions_root[2:]: transactions.append({ \"id\": 0, \"ref_no\": int(convert_acct_fitid(trans[3].text)), \"date\": convert_date_to_ISO(trans[1].text), \"account\":",
"\"splits\":[ { \"amount\": float, \"categories\": [], \"linked_transaction\": int, \"countinbudget\": bool } ] }",
"\"ref_no\": int, \"date\": string, \"account\": int, account code, \"isincome\": bool, \"countinbudget\": bool, \"payee\":",
"def import_cc_statement(): print(\"Importing BOFA CC Statement\") parser = OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:]",
"\"linked_transaction\": int, \"countinbudget\": bool } ] } \"\"\" def fetch_all_transactions(): cc = import_cc_statement()",
"return \"{}-{}-{}T{}:{}:{}Z\".format( datestring[:4], datestring[4:6], datestring[6:8], datestring[8:10], datestring[10:12], datestring[12:14], ) def import_acct_statement(): print(\"Importing BOFA",
"acct def convert_acct_fitid(id): if \"-\" in id: return id.split(\"-\")[0] else: return id.split(\".\")[0] def",
"\"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) parser.parse('./data/stmt.qfx') accounts.append({ \"name\": \"BOFA_CHECKING\", \"id\": 0, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text)",
"\"\"\" statement schema: { \"id\":int, \"ref_no\": int, \"date\": string, \"account\": int, account code,",
"}, \"categories\": [], \"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions def fetch_acct_info(): print(\"Updating",
"bool } ] } \"\"\" def fetch_all_transactions(): cc = import_cc_statement() acct = import_acct_statement()",
"[] parser = OFXTree() parser.parse('./data/currentTransaction_1626.qfx') accounts.append({ \"name\": \"BOFA_CASHREWARDS_CC\", \"id\": 2, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\":",
"in transactions_root[2:]: transactions.append({ \"id\": id, \"ref_no\": int(trans[3].text), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CASHREWARDS_CC\", \"payee\": trans[6].text,",
"\"\"\" def fetch_all_transactions(): cc = import_cc_statement() acct = import_acct_statement() return cc + acct",
"Statement\") parser = OFXTree() parser.parse(\"./data/currentTransaction_1626.qfx\") transactions_root = parser.find(\".//BANKTRANLIST\")[:] id = 0 transactions =",
"0, \"ref_no\": int(convert_acct_fitid(trans[3].text)), \"date\": convert_date_to_ISO(trans[1].text), \"account\": \"BOFA_CHECKING\", \"payee\": trans[4].text, \"notes\": { \"bank\": \"\",",
"\"BOFA_CHECKING\", \"id\": 0, \"balance\": parser.find(\".//BALAMT\").text, \"last_updated\": convert_date_to_ISO(parser.find(\".//DTASOF\").text) }) return accounts if __name__ ==",
"\"\", \"personal\": \"\" }, \"categories\": [], \"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions",
"print(\"Updating Account Information\") accounts = [] parser = OFXTree() parser.parse('./data/currentTransaction_1626.qfx') accounts.append({ \"name\": \"BOFA_CASHREWARDS_CC\",",
"[], \"totalamount\": float(trans[2].text), \"splits\": [] }) return transactions def fetch_acct_info(): print(\"Updating Account Information\")",
"datestring[12:14], ) def import_acct_statement(): print(\"Importing BOFA Account Statement\") parser = OFXTree() parser.parse(\"./data/stmt.qfx\") transactions_root",
"= parser.find(\".//BANKTRANLIST\")[:] transactions = [] for trans in transactions_root[2:]: transactions.append({ \"id\": 0, \"ref_no\":"
] |
[
"int, positional_encoding=None, embed_dim=256, num_heads=8, ff_dim=1024, dropout_rate=0.1, is_encoder_decoder=False, ): super(GreatTransformerConfig, self).__init__() self.num_layers = num_layers",
"def __init__( self, num_layers: int, positional_encoding=None, embed_dim=256, num_heads=8, ff_dim=1024, dropout_rate=0.1, is_encoder_decoder=False, ): super(GreatTransformerConfig,",
"= embed_dim self.num_heads = num_heads self.ff_dim = ff_dim self.dropout_rate = dropout_rate self.is_encoder_decoder =",
"self, transformer_config: GreatTransformerConfig, vocab_size=32000, num_node_types=None, subtokens_per_token=5, num_languages=None, ): super(GreatEncoderConfig, self).__init__() self.transformer_config = transformer_config",
"super(GreatTransformerConfig, self).__init__() self.num_layers = num_layers self.positional_encoding = positional_encoding self.embed_dim = embed_dim self.hidden_dim =",
"): super(GreatEncoderConfig, self).__init__() self.transformer_config = transformer_config self.vocab_size = vocab_size self.num_node_types = num_node_types self.subtokens_per_token",
"class GreatTransformerConfig(ModelConfiguration): def __init__( self, num_layers: int, positional_encoding=None, embed_dim=256, num_heads=8, ff_dim=1024, dropout_rate=0.1, is_encoder_decoder=False,",
"embed_dim self.hidden_dim = embed_dim self.attention_dim = embed_dim self.bias_dim = embed_dim self.num_heads = num_heads",
"embed_dim self.num_heads = num_heads self.ff_dim = ff_dim self.dropout_rate = dropout_rate self.is_encoder_decoder = is_encoder_decoder",
"num_heads self.ff_dim = ff_dim self.dropout_rate = dropout_rate self.is_encoder_decoder = is_encoder_decoder class GreatEncoderConfig(ModelConfiguration): def",
"= transformer_config self.vocab_size = vocab_size self.num_node_types = num_node_types self.subtokens_per_token = subtokens_per_token self.num_languages =",
"<gh_stars>0 from code_transformer.configuration.configuration_utils import ModelConfiguration class GreatTransformerConfig(ModelConfiguration): def __init__( self, num_layers: int, positional_encoding=None,",
"= positional_encoding self.embed_dim = embed_dim self.hidden_dim = embed_dim self.attention_dim = embed_dim self.bias_dim =",
"self.num_layers = num_layers self.positional_encoding = positional_encoding self.embed_dim = embed_dim self.hidden_dim = embed_dim self.attention_dim",
"def __init__( self, transformer_config: GreatTransformerConfig, vocab_size=32000, num_node_types=None, subtokens_per_token=5, num_languages=None, ): super(GreatEncoderConfig, self).__init__() self.transformer_config",
"positional_encoding self.embed_dim = embed_dim self.hidden_dim = embed_dim self.attention_dim = embed_dim self.bias_dim = embed_dim",
"code_transformer.configuration.configuration_utils import ModelConfiguration class GreatTransformerConfig(ModelConfiguration): def __init__( self, num_layers: int, positional_encoding=None, embed_dim=256, num_heads=8,",
"import ModelConfiguration class GreatTransformerConfig(ModelConfiguration): def __init__( self, num_layers: int, positional_encoding=None, embed_dim=256, num_heads=8, ff_dim=1024,",
"ff_dim=1024, dropout_rate=0.1, is_encoder_decoder=False, ): super(GreatTransformerConfig, self).__init__() self.num_layers = num_layers self.positional_encoding = positional_encoding self.embed_dim",
"= ff_dim self.dropout_rate = dropout_rate self.is_encoder_decoder = is_encoder_decoder class GreatEncoderConfig(ModelConfiguration): def __init__( self,",
"= is_encoder_decoder class GreatEncoderConfig(ModelConfiguration): def __init__( self, transformer_config: GreatTransformerConfig, vocab_size=32000, num_node_types=None, subtokens_per_token=5, num_languages=None,",
"= embed_dim self.hidden_dim = embed_dim self.attention_dim = embed_dim self.bias_dim = embed_dim self.num_heads =",
"class GreatEncoderConfig(ModelConfiguration): def __init__( self, transformer_config: GreatTransformerConfig, vocab_size=32000, num_node_types=None, subtokens_per_token=5, num_languages=None, ): super(GreatEncoderConfig,",
"= embed_dim self.bias_dim = embed_dim self.num_heads = num_heads self.ff_dim = ff_dim self.dropout_rate =",
"num_layers: int, positional_encoding=None, embed_dim=256, num_heads=8, ff_dim=1024, dropout_rate=0.1, is_encoder_decoder=False, ): super(GreatTransformerConfig, self).__init__() self.num_layers =",
"ModelConfiguration class GreatTransformerConfig(ModelConfiguration): def __init__( self, num_layers: int, positional_encoding=None, embed_dim=256, num_heads=8, ff_dim=1024, dropout_rate=0.1,",
"__init__( self, transformer_config: GreatTransformerConfig, vocab_size=32000, num_node_types=None, subtokens_per_token=5, num_languages=None, ): super(GreatEncoderConfig, self).__init__() self.transformer_config =",
"= dropout_rate self.is_encoder_decoder = is_encoder_decoder class GreatEncoderConfig(ModelConfiguration): def __init__( self, transformer_config: GreatTransformerConfig, vocab_size=32000,",
"self.bias_dim = embed_dim self.num_heads = num_heads self.ff_dim = ff_dim self.dropout_rate = dropout_rate self.is_encoder_decoder",
"GreatTransformerConfig, vocab_size=32000, num_node_types=None, subtokens_per_token=5, num_languages=None, ): super(GreatEncoderConfig, self).__init__() self.transformer_config = transformer_config self.vocab_size =",
"num_node_types=None, subtokens_per_token=5, num_languages=None, ): super(GreatEncoderConfig, self).__init__() self.transformer_config = transformer_config self.vocab_size = vocab_size self.num_node_types",
"self.attention_dim = embed_dim self.bias_dim = embed_dim self.num_heads = num_heads self.ff_dim = ff_dim self.dropout_rate",
"self.embed_dim = embed_dim self.hidden_dim = embed_dim self.attention_dim = embed_dim self.bias_dim = embed_dim self.num_heads",
"transformer_config self.vocab_size = vocab_size self.num_node_types = num_node_types self.subtokens_per_token = subtokens_per_token self.num_languages = num_languages",
"self).__init__() self.num_layers = num_layers self.positional_encoding = positional_encoding self.embed_dim = embed_dim self.hidden_dim = embed_dim",
"is_encoder_decoder=False, ): super(GreatTransformerConfig, self).__init__() self.num_layers = num_layers self.positional_encoding = positional_encoding self.embed_dim = embed_dim",
"embed_dim=256, num_heads=8, ff_dim=1024, dropout_rate=0.1, is_encoder_decoder=False, ): super(GreatTransformerConfig, self).__init__() self.num_layers = num_layers self.positional_encoding =",
"self.hidden_dim = embed_dim self.attention_dim = embed_dim self.bias_dim = embed_dim self.num_heads = num_heads self.ff_dim",
"num_languages=None, ): super(GreatEncoderConfig, self).__init__() self.transformer_config = transformer_config self.vocab_size = vocab_size self.num_node_types = num_node_types",
"self).__init__() self.transformer_config = transformer_config self.vocab_size = vocab_size self.num_node_types = num_node_types self.subtokens_per_token = subtokens_per_token",
"num_layers self.positional_encoding = positional_encoding self.embed_dim = embed_dim self.hidden_dim = embed_dim self.attention_dim = embed_dim",
"self.num_heads = num_heads self.ff_dim = ff_dim self.dropout_rate = dropout_rate self.is_encoder_decoder = is_encoder_decoder class",
"GreatEncoderConfig(ModelConfiguration): def __init__( self, transformer_config: GreatTransformerConfig, vocab_size=32000, num_node_types=None, subtokens_per_token=5, num_languages=None, ): super(GreatEncoderConfig, self).__init__()",
"__init__( self, num_layers: int, positional_encoding=None, embed_dim=256, num_heads=8, ff_dim=1024, dropout_rate=0.1, is_encoder_decoder=False, ): super(GreatTransformerConfig, self).__init__()",
"embed_dim self.attention_dim = embed_dim self.bias_dim = embed_dim self.num_heads = num_heads self.ff_dim = ff_dim",
"from code_transformer.configuration.configuration_utils import ModelConfiguration class GreatTransformerConfig(ModelConfiguration): def __init__( self, num_layers: int, positional_encoding=None, embed_dim=256,",
"transformer_config: GreatTransformerConfig, vocab_size=32000, num_node_types=None, subtokens_per_token=5, num_languages=None, ): super(GreatEncoderConfig, self).__init__() self.transformer_config = transformer_config self.vocab_size",
"self.transformer_config = transformer_config self.vocab_size = vocab_size self.num_node_types = num_node_types self.subtokens_per_token = subtokens_per_token self.num_languages",
"self.is_encoder_decoder = is_encoder_decoder class GreatEncoderConfig(ModelConfiguration): def __init__( self, transformer_config: GreatTransformerConfig, vocab_size=32000, num_node_types=None, subtokens_per_token=5,",
"embed_dim self.bias_dim = embed_dim self.num_heads = num_heads self.ff_dim = ff_dim self.dropout_rate = dropout_rate",
"GreatTransformerConfig(ModelConfiguration): def __init__( self, num_layers: int, positional_encoding=None, embed_dim=256, num_heads=8, ff_dim=1024, dropout_rate=0.1, is_encoder_decoder=False, ):",
"): super(GreatTransformerConfig, self).__init__() self.num_layers = num_layers self.positional_encoding = positional_encoding self.embed_dim = embed_dim self.hidden_dim",
"subtokens_per_token=5, num_languages=None, ): super(GreatEncoderConfig, self).__init__() self.transformer_config = transformer_config self.vocab_size = vocab_size self.num_node_types =",
"is_encoder_decoder class GreatEncoderConfig(ModelConfiguration): def __init__( self, transformer_config: GreatTransformerConfig, vocab_size=32000, num_node_types=None, subtokens_per_token=5, num_languages=None, ):",
"dropout_rate self.is_encoder_decoder = is_encoder_decoder class GreatEncoderConfig(ModelConfiguration): def __init__( self, transformer_config: GreatTransformerConfig, vocab_size=32000, num_node_types=None,",
"self.ff_dim = ff_dim self.dropout_rate = dropout_rate self.is_encoder_decoder = is_encoder_decoder class GreatEncoderConfig(ModelConfiguration): def __init__(",
"self.positional_encoding = positional_encoding self.embed_dim = embed_dim self.hidden_dim = embed_dim self.attention_dim = embed_dim self.bias_dim",
"num_heads=8, ff_dim=1024, dropout_rate=0.1, is_encoder_decoder=False, ): super(GreatTransformerConfig, self).__init__() self.num_layers = num_layers self.positional_encoding = positional_encoding",
"ff_dim self.dropout_rate = dropout_rate self.is_encoder_decoder = is_encoder_decoder class GreatEncoderConfig(ModelConfiguration): def __init__( self, transformer_config:",
"positional_encoding=None, embed_dim=256, num_heads=8, ff_dim=1024, dropout_rate=0.1, is_encoder_decoder=False, ): super(GreatTransformerConfig, self).__init__() self.num_layers = num_layers self.positional_encoding",
"= embed_dim self.attention_dim = embed_dim self.bias_dim = embed_dim self.num_heads = num_heads self.ff_dim =",
"= num_layers self.positional_encoding = positional_encoding self.embed_dim = embed_dim self.hidden_dim = embed_dim self.attention_dim =",
"dropout_rate=0.1, is_encoder_decoder=False, ): super(GreatTransformerConfig, self).__init__() self.num_layers = num_layers self.positional_encoding = positional_encoding self.embed_dim =",
"= num_heads self.ff_dim = ff_dim self.dropout_rate = dropout_rate self.is_encoder_decoder = is_encoder_decoder class GreatEncoderConfig(ModelConfiguration):",
"vocab_size=32000, num_node_types=None, subtokens_per_token=5, num_languages=None, ): super(GreatEncoderConfig, self).__init__() self.transformer_config = transformer_config self.vocab_size = vocab_size",
"self, num_layers: int, positional_encoding=None, embed_dim=256, num_heads=8, ff_dim=1024, dropout_rate=0.1, is_encoder_decoder=False, ): super(GreatTransformerConfig, self).__init__() self.num_layers",
"super(GreatEncoderConfig, self).__init__() self.transformer_config = transformer_config self.vocab_size = vocab_size self.num_node_types = num_node_types self.subtokens_per_token =",
"self.dropout_rate = dropout_rate self.is_encoder_decoder = is_encoder_decoder class GreatEncoderConfig(ModelConfiguration): def __init__( self, transformer_config: GreatTransformerConfig,"
] |
[
"test_task_3(output_paths): if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"0.txt\") path_to_correct_output_2 =",
"else: correctness.append(scores[0]) print(\"CORRECTNESS\") print(correctness) return correctness def check_test_case(path_to_team_output, path_to_correct_output, test_case_number): print(\"team_output = \"",
"[4] return [0] def test_task_1_2(output_paths, task_number): if(output_paths[0] == None): return [0] path_to_correct_output_1 =",
"if x_1 == 0 or x_2 == 0: return [4] return [0] def",
"\"Task\"+task_number, \"1.txt\") path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"2.txt\") path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"3.txt\")''' x_1",
"os.system(\"diff -wBZ \"+path_to_correct_output_4 + \" \" + output_paths[0] + \"> abc\") # print(\"x_1",
"\"2.txt\") path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"3.txt\")''' x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \"",
"None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"0.txt\") path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"1.txt\")",
"return [3] x_3 = os.system(\"diff -wBZ \"+path_to_correct_output_3 + \" \" + output_paths[0] +",
"-wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0] + \"> abc\") x_2 = os.system(\"diff",
"i in range(4): score = check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, str(i) + \".txt\")), str(i)) if(score",
"\"Task3\", \"1.txt\") x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0] +",
"for i in range(4): score = check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, str(i) + \".txt\")), str(i))",
"score = check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, str(i) + \".txt\")), str(i)) if(score != 0): scores.append(score)",
"\" + output_paths[0] + \"> abc\") x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \"",
"= os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"0.txt\") '''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"1.txt\") path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number,",
"return [2] x_4 = os.system(\"diff -wBZ \"+path_to_correct_output_4 + \" \" + output_paths[0] +",
"== 0: return [2] x_4 = os.system(\"diff -wBZ \"+path_to_correct_output_4 + \" \" +",
"\"Task3\", \"0.txt\") path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"1.txt\") x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 +",
"return [4] return [0] def test_task_1_2(output_paths, task_number): if(output_paths[0] == None): return [0] path_to_correct_output_1",
"'''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"1.txt\") path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"2.txt\") path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH,",
"test_case = output_paths.index(output_path) + 1 for i in range(4): score = check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH,",
"import os from hdfs import InsecureClient from .config import * def test_task_3(output_paths): if(output_paths[0]",
"\"Task\"+task_number, \"0.txt\") '''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"1.txt\") path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"2.txt\") path_to_correct_output_4",
"os.system(\"diff -wBZ \"+path_to_correct_output_3 + \" \" + output_paths[0] + \"> abc\") if x_3",
"str(x_4)) if x_4 == 0: return [1]''' return [0] def test(output_paths, task_number): correctness",
"= os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"2.txt\") path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"3.txt\")''' x_1 = os.system(\"diff -wBZ",
"= os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"3.txt\")''' x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" +",
"output_path in output_paths: scores = [] if output_path is None: print(\"SKIPPING TEST CASE",
"= \" + path_to_correct_output) score = 4 - int(test_case_number) x = os.system(\"diff -wBZ",
"None: print(\"SKIPPING TEST CASE \") continue print(output_path) test_case = output_paths.index(output_path) + 1 for",
"correctness def check_test_case(path_to_team_output, path_to_correct_output, test_case_number): print(\"team_output = \" + path_to_team_output) print(\"setters_output = \"",
"range(4): score = check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, str(i) + \".txt\")), str(i)) if(score != 0):",
"if(len(scores) == 0): correctness.append(0) else: correctness.append(scores[0]) print(\"CORRECTNESS\") print(correctness) return correctness def check_test_case(path_to_team_output, path_to_correct_output,",
"= [] if output_path is None: print(\"SKIPPING TEST CASE \") continue print(output_path) test_case",
"\"> abc\") # print(\"x_1 = \" + str(x_1)) # print(\"x_2 = \" +",
"+ \" > abc\") print(\"x = \"+str(x)) if x == 0: print(\"TEST CASE",
"x_2 == 0: return [3] x_3 = os.system(\"diff -wBZ \"+path_to_correct_output_3 + \" \"",
"\"+path_to_correct_output_2 + \" \" + output_paths[0] + \"> abc\") if x_2 == 0:",
"test(output_paths, task_number): correctness = [] for output_path in output_paths: scores = [] if",
"\" \" + output_paths[0] + \"> abc\") if x_1 == 0 or x_2",
"== 0 or x_2 == 0: return [4] return [0] def test_task_1_2(output_paths, task_number):",
"\" + output_paths[0] + \"> abc\") if x_2 == 0: return [3] x_3",
"+ \" \" + output_paths[0] + \"> abc\") if x_1 == 0 or",
"0: return [1]''' return [0] def test(output_paths, task_number): correctness = [] for output_path",
"path_to_correct_output, test_case_number): print(\"team_output = \" + path_to_team_output) print(\"setters_output = \" + path_to_correct_output) score",
"return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"0.txt\") '''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"1.txt\") path_to_correct_output_3",
"return [1]''' return [0] def test(output_paths, task_number): correctness = [] for output_path in",
"path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"2.txt\") path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"3.txt\")''' x_1 = os.system(\"diff",
"+ output_paths[0] + \"> abc\") x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \"",
"== None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"0.txt\") path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\",",
"abc\") if x_2 == 0: return [3] x_3 = os.system(\"diff -wBZ \"+path_to_correct_output_3 +",
"print(\"x_4 = \" + str(x_4)) if x_4 == 0: return [1]''' return [0]",
"\".txt\")), str(i)) if(score != 0): scores.append(score) break if(len(scores) == 0): correctness.append(0) else: correctness.append(scores[0])",
"\"> abc\") if x_1 == 0 or x_2 == 0: return [4] return",
"return [0] def test_task_1_2(output_paths, task_number): if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH,",
"path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"3.txt\")''' x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \"",
"\"0.txt\") '''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"1.txt\") path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"2.txt\") path_to_correct_output_4 =",
"import InsecureClient from .config import * def test_task_3(output_paths): if(output_paths[0] == None): return [0]",
"InsecureClient from .config import * def test_task_3(output_paths): if(output_paths[0] == None): return [0] path_to_correct_output_1",
"return [0] def test(output_paths, task_number): correctness = [] for output_path in output_paths: scores",
"None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"0.txt\") '''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"1.txt\")",
"[] if output_path is None: print(\"SKIPPING TEST CASE \") continue print(output_path) test_case =",
"abc\") if x_3 == 0: return [2] x_4 = os.system(\"diff -wBZ \"+path_to_correct_output_4 +",
"str(x_1)) # print(\"x_2 = \" + str(x_2)) # print(\"x_3 = \" + str(x_3))",
"\" + output_paths[0] + \"> abc\") if x_3 == 0: return [2] x_4",
"import * def test_task_3(output_paths): if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\",",
"if output_path is None: print(\"SKIPPING TEST CASE \") continue print(output_path) test_case = output_paths.index(output_path)",
"is None: print(\"SKIPPING TEST CASE \") continue print(output_path) test_case = output_paths.index(output_path) + 1",
"\"1.txt\") x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0] + \">",
"if x_2 == 0: return [3] x_3 = os.system(\"diff -wBZ \"+path_to_correct_output_3 + \"",
"[1]''' return [0] def test(output_paths, task_number): correctness = [] for output_path in output_paths:",
"\" \" + output_paths[0] + \"> abc\") x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 +",
"os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"2.txt\") path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"3.txt\")''' x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1",
"[0] def test(output_paths, task_number): correctness = [] for output_path in output_paths: scores =",
".config import * def test_task_3(output_paths): if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH,",
"abc\") # print(\"x_1 = \" + str(x_1)) # print(\"x_2 = \" + str(x_2))",
"os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0] + \"> abc\") x_2 =",
"task_number): correctness = [] for output_path in output_paths: scores = [] if output_path",
"output_paths[0] + \"> abc\") if x_1 == 0 or x_2 == 0: return",
"+ \"> abc\") if x_1 == 0 or x_2 == 0: return [4]",
"== 0: return [3] x_3 = os.system(\"diff -wBZ \"+path_to_correct_output_3 + \" \" +",
"0): scores.append(score) break if(len(scores) == 0): correctness.append(0) else: correctness.append(scores[0]) print(\"CORRECTNESS\") print(correctness) return correctness",
"path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"1.txt\") x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \"",
"or x_2 == 0: return [4] return [0] def test_task_1_2(output_paths, task_number): if(output_paths[0] ==",
"if(score != 0): scores.append(score) break if(len(scores) == 0): correctness.append(0) else: correctness.append(scores[0]) print(\"CORRECTNESS\") print(correctness)",
"def check_test_case(path_to_team_output, path_to_correct_output, test_case_number): print(\"team_output = \" + path_to_team_output) print(\"setters_output = \" +",
"0: print(\"TEST CASE \" + str(test_case_number) + \" PASSED\") return score else: return",
"output_paths[0] + \"> abc\") x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" +",
"# print(\"x_3 = \" + str(x_3)) # print(\"x_4 = \" + str(x_4)) if",
"== 0: return [1]''' return [0] def test(output_paths, task_number): correctness = [] for",
"os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"0.txt\") '''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"1.txt\") path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"2.txt\")",
"+ path_to_correct_output) score = 4 - int(test_case_number) x = os.system(\"diff -wBZ \"+path_to_team_output+\" \"+path_to_correct_output",
"\") continue print(output_path) test_case = output_paths.index(output_path) + 1 for i in range(4): score",
"+ output_paths[0] + \"> abc\") if x_3 == 0: return [2] x_4 =",
"\" + str(x_4)) if x_4 == 0: return [1]''' return [0] def test(output_paths,",
"= output_paths.index(output_path) + 1 for i in range(4): score = check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number,",
"if x_3 == 0: return [2] x_4 = os.system(\"diff -wBZ \"+path_to_correct_output_4 + \"",
"= [] for output_path in output_paths: scores = [] if output_path is None:",
"- int(test_case_number) x = os.system(\"diff -wBZ \"+path_to_team_output+\" \"+path_to_correct_output + \" > abc\") print(\"x",
"= os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"0.txt\") path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"1.txt\") x_1 = os.system(\"diff -wBZ",
"# print(\"x_1 = \" + str(x_1)) # print(\"x_2 = \" + str(x_2)) #",
"path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"0.txt\") path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"1.txt\") x_1 = os.system(\"diff",
"\" + output_paths[0] + \"> abc\") if x_1 == 0: return [4] '''x_2",
"test_task_1_2(output_paths, task_number): if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"0.txt\") '''path_to_correct_output_2",
"== None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"0.txt\") '''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number,",
"= os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0] + \"> abc\") x_2",
"'''x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0] + \"> abc\")",
"+ \" \" + output_paths[0] + \"> abc\") if x_2 == 0: return",
"-wBZ \"+path_to_correct_output_3 + \" \" + output_paths[0] + \"> abc\") if x_3 ==",
"if x_4 == 0: return [1]''' return [0] def test(output_paths, task_number): correctness =",
"[] for output_path in output_paths: scores = [] if output_path is None: print(\"SKIPPING",
"= \" + path_to_team_output) print(\"setters_output = \" + path_to_correct_output) score = 4 -",
"if x == 0: print(\"TEST CASE \" + str(test_case_number) + \" PASSED\") return",
"score = 4 - int(test_case_number) x = os.system(\"diff -wBZ \"+path_to_team_output+\" \"+path_to_correct_output + \"",
"+ 1 for i in range(4): score = check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, str(i) +",
"* def test_task_3(output_paths): if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"0.txt\")",
"str(i) + \".txt\")), str(i)) if(score != 0): scores.append(score) break if(len(scores) == 0): correctness.append(0)",
"in range(4): score = check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, str(i) + \".txt\")), str(i)) if(score !=",
"if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"0.txt\") path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH,",
"x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0] + \"> abc\")",
"output_paths[0] + \"> abc\") # print(\"x_1 = \" + str(x_1)) # print(\"x_2 =",
"\" + output_paths[0] + \"> abc\") # print(\"x_1 = \" + str(x_1)) #",
"\" \" + output_paths[0] + \"> abc\") # print(\"x_1 = \" + str(x_1))",
"output_paths.index(output_path) + 1 for i in range(4): score = check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, str(i)",
"\"Task\"+task_number, str(i) + \".txt\")), str(i)) if(score != 0): scores.append(score) break if(len(scores) == 0):",
"x_1 == 0: return [4] '''x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \"",
"\" \" + output_paths[0] + \"> abc\") if x_2 == 0: return [3]",
"return correctness def check_test_case(path_to_team_output, path_to_correct_output, test_case_number): print(\"team_output = \" + path_to_team_output) print(\"setters_output =",
"0: return [3] x_3 = os.system(\"diff -wBZ \"+path_to_correct_output_3 + \" \" + output_paths[0]",
"\"1.txt\") path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"2.txt\") path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"3.txt\")''' x_1 =",
"\" \" + output_paths[0] + \"> abc\") if x_1 == 0: return [4]",
"\"> abc\") if x_3 == 0: return [2] x_4 = os.system(\"diff -wBZ \"+path_to_correct_output_4",
"+ \" \" + output_paths[0] + \"> abc\") # print(\"x_1 = \" +",
"= \" + str(x_1)) # print(\"x_2 = \" + str(x_2)) # print(\"x_3 =",
"return [4] '''x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0] +",
"\"0.txt\") path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"1.txt\") x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \"",
"\"> abc\") if x_2 == 0: return [3] x_3 = os.system(\"diff -wBZ \"+path_to_correct_output_3",
"path_to_team_output) print(\"setters_output = \" + path_to_correct_output) score = 4 - int(test_case_number) x =",
"= os.system(\"diff -wBZ \"+path_to_team_output+\" \"+path_to_correct_output + \" > abc\") print(\"x = \"+str(x)) if",
"x = os.system(\"diff -wBZ \"+path_to_team_output+\" \"+path_to_correct_output + \" > abc\") print(\"x = \"+str(x))",
"\"+str(x)) if x == 0: print(\"TEST CASE \" + str(test_case_number) + \" PASSED\")",
"path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"0.txt\") '''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"1.txt\") path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH,",
"\"> abc\") x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0] +",
"+ \"> abc\") if x_1 == 0: return [4] '''x_2 = os.system(\"diff -wBZ",
"= os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"1.txt\") path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"2.txt\") path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number,",
"= os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0] + \"> abc\") if",
"for output_path in output_paths: scores = [] if output_path is None: print(\"SKIPPING TEST",
"int(test_case_number) x = os.system(\"diff -wBZ \"+path_to_team_output+\" \"+path_to_correct_output + \" > abc\") print(\"x =",
"+ output_paths[0] + \"> abc\") # print(\"x_1 = \" + str(x_1)) # print(\"x_2",
"print(\"setters_output = \" + path_to_correct_output) score = 4 - int(test_case_number) x = os.system(\"diff",
"\"Task\"+task_number, \"2.txt\") path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"3.txt\")''' x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 +",
"\"> abc\") if x_1 == 0: return [4] '''x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2",
"x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0] + \"> abc\")",
"= check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, str(i) + \".txt\")), str(i)) if(score != 0): scores.append(score) break",
"str(x_2)) # print(\"x_3 = \" + str(x_3)) # print(\"x_4 = \" + str(x_4))",
"[0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"0.txt\") path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"1.txt\") x_1 =",
"x_1 == 0 or x_2 == 0: return [4] return [0] def test_task_1_2(output_paths,",
"-wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0] + \"> abc\") if x_1 ==",
"0 or x_2 == 0: return [4] return [0] def test_task_1_2(output_paths, task_number): if(output_paths[0]",
"= os.system(\"diff -wBZ \"+path_to_correct_output_3 + \" \" + output_paths[0] + \"> abc\") if",
"\"+path_to_correct_output_4 + \" \" + output_paths[0] + \"> abc\") # print(\"x_1 = \"",
"== 0: return [4] '''x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" +",
"= os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0] + \"> abc\") if",
"+ \" \" + output_paths[0] + \"> abc\") if x_3 == 0: return",
"correctness.append(0) else: correctness.append(scores[0]) print(\"CORRECTNESS\") print(correctness) return correctness def check_test_case(path_to_team_output, path_to_correct_output, test_case_number): print(\"team_output =",
"0: return [4] return [0] def test_task_1_2(output_paths, task_number): if(output_paths[0] == None): return [0]",
"[4] '''x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0] + \">",
"-wBZ \"+path_to_correct_output_4 + \" \" + output_paths[0] + \"> abc\") # print(\"x_1 =",
"os.system(\"diff -wBZ \"+path_to_team_output+\" \"+path_to_correct_output + \" > abc\") print(\"x = \"+str(x)) if x",
"str(x_3)) # print(\"x_4 = \" + str(x_4)) if x_4 == 0: return [1]'''",
"abc\") print(\"x = \"+str(x)) if x == 0: print(\"TEST CASE \" + str(test_case_number)",
"x_2 == 0: return [4] return [0] def test_task_1_2(output_paths, task_number): if(output_paths[0] == None):",
"\"+path_to_correct_output_1 + \" \" + output_paths[0] + \"> abc\") x_2 = os.system(\"diff -wBZ",
"\"3.txt\")''' x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0] + \">",
"\" > abc\") print(\"x = \"+str(x)) if x == 0: print(\"TEST CASE \"",
"test_case_number): print(\"team_output = \" + path_to_team_output) print(\"setters_output = \" + path_to_correct_output) score =",
"def test_task_3(output_paths): if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"0.txt\") path_to_correct_output_2",
"from hdfs import InsecureClient from .config import * def test_task_3(output_paths): if(output_paths[0] == None):",
"print(\"x = \"+str(x)) if x == 0: print(\"TEST CASE \" + str(test_case_number) +",
"os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"1.txt\") path_to_correct_output_3 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"2.txt\") path_to_correct_output_4 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"3.txt\")'''",
"check_test_case(path_to_team_output, path_to_correct_output, test_case_number): print(\"team_output = \" + path_to_team_output) print(\"setters_output = \" + path_to_correct_output)",
"[2] x_4 = os.system(\"diff -wBZ \"+path_to_correct_output_4 + \" \" + output_paths[0] + \">",
"output_paths[0] + \"> abc\") if x_3 == 0: return [2] x_4 = os.system(\"diff",
"(os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, str(i) + \".txt\")), str(i)) if(score != 0): scores.append(score) break if(len(scores) ==",
"= \" + str(x_3)) # print(\"x_4 = \" + str(x_4)) if x_4 ==",
"os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0] + \"> abc\") if x_1",
"== 0: return [4] return [0] def test_task_1_2(output_paths, task_number): if(output_paths[0] == None): return",
"# print(\"x_2 = \" + str(x_2)) # print(\"x_3 = \" + str(x_3)) #",
"= \" + str(x_4)) if x_4 == 0: return [1]''' return [0] def",
"x_4 == 0: return [1]''' return [0] def test(output_paths, task_number): correctness = []",
"check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, str(i) + \".txt\")), str(i)) if(score != 0): scores.append(score) break if(len(scores)",
"str(i)) if(score != 0): scores.append(score) break if(len(scores) == 0): correctness.append(0) else: correctness.append(scores[0]) print(\"CORRECTNESS\")",
"output_paths[0] + \"> abc\") if x_2 == 0: return [3] x_3 = os.system(\"diff",
"os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"0.txt\") path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"1.txt\") x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1",
"TEST CASE \") continue print(output_path) test_case = output_paths.index(output_path) + 1 for i in",
"\" + output_paths[0] + \"> abc\") if x_1 == 0 or x_2 ==",
"x_3 = os.system(\"diff -wBZ \"+path_to_correct_output_3 + \" \" + output_paths[0] + \"> abc\")",
"os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0] + \"> abc\") if x_2",
"scores = [] if output_path is None: print(\"SKIPPING TEST CASE \") continue print(output_path)",
"os from hdfs import InsecureClient from .config import * def test_task_3(output_paths): if(output_paths[0] ==",
"\"+path_to_team_output+\" \"+path_to_correct_output + \" > abc\") print(\"x = \"+str(x)) if x == 0:",
"== 0): correctness.append(0) else: correctness.append(scores[0]) print(\"CORRECTNESS\") print(correctness) return correctness def check_test_case(path_to_team_output, path_to_correct_output, test_case_number):",
"\"+path_to_correct_output_1 + \" \" + output_paths[0] + \"> abc\") if x_1 == 0:",
"os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"1.txt\") x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0]",
"-wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0] + \"> abc\") if x_2 ==",
"+ \".txt\")), str(i)) if(score != 0): scores.append(score) break if(len(scores) == 0): correctness.append(0) else:",
"-wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0] + \"> abc\") if x_1 ==",
"print(\"CORRECTNESS\") print(correctness) return correctness def check_test_case(path_to_team_output, path_to_correct_output, test_case_number): print(\"team_output = \" + path_to_team_output)",
"= \"+str(x)) if x == 0: print(\"TEST CASE \" + str(test_case_number) + \"",
"+ str(x_3)) # print(\"x_4 = \" + str(x_4)) if x_4 == 0: return",
"[3] x_3 = os.system(\"diff -wBZ \"+path_to_correct_output_3 + \" \" + output_paths[0] + \">",
"= \" + str(x_2)) # print(\"x_3 = \" + str(x_3)) # print(\"x_4 =",
"return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"0.txt\") path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"1.txt\") x_1",
"abc\") if x_1 == 0 or x_2 == 0: return [4] return [0]",
"if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"0.txt\") '''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH,",
"output_paths[0] + \"> abc\") if x_1 == 0: return [4] '''x_2 = os.system(\"diff",
"[0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"0.txt\") '''path_to_correct_output_2 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"1.txt\") path_to_correct_output_3 =",
"print(\"team_output = \" + path_to_team_output) print(\"setters_output = \" + path_to_correct_output) score = 4",
"\"+path_to_correct_output + \" > abc\") print(\"x = \"+str(x)) if x == 0: print(\"TEST",
"\" + str(x_2)) # print(\"x_3 = \" + str(x_3)) # print(\"x_4 = \"",
"+ \" \" + output_paths[0] + \"> abc\") x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2",
"-wBZ \"+path_to_team_output+\" \"+path_to_correct_output + \" > abc\") print(\"x = \"+str(x)) if x ==",
"x == 0: print(\"TEST CASE \" + str(test_case_number) + \" PASSED\") return score",
"> abc\") print(\"x = \"+str(x)) if x == 0: print(\"TEST CASE \" +",
"0: return [2] x_4 = os.system(\"diff -wBZ \"+path_to_correct_output_4 + \" \" + output_paths[0]",
"def test(output_paths, task_number): correctness = [] for output_path in output_paths: scores = []",
"4 - int(test_case_number) x = os.system(\"diff -wBZ \"+path_to_team_output+\" \"+path_to_correct_output + \" > abc\")",
"print(output_path) test_case = output_paths.index(output_path) + 1 for i in range(4): score = check_test_case(output_path,",
"\" + path_to_team_output) print(\"setters_output = \" + path_to_correct_output) score = 4 - int(test_case_number)",
"+ path_to_team_output) print(\"setters_output = \" + path_to_correct_output) score = 4 - int(test_case_number) x",
"\"+path_to_correct_output_2 + \" \" + output_paths[0] + \"> abc\") if x_1 == 0",
"+ output_paths[0] + \"> abc\") if x_1 == 0: return [4] '''x_2 =",
"continue print(output_path) test_case = output_paths.index(output_path) + 1 for i in range(4): score =",
"+ output_paths[0] + \"> abc\") if x_1 == 0 or x_2 == 0:",
"\" + str(x_3)) # print(\"x_4 = \" + str(x_4)) if x_4 == 0:",
"+ output_paths[0] + \"> abc\") if x_2 == 0: return [3] x_3 =",
"= os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task3\", \"1.txt\") x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" +",
"path_to_correct_output) score = 4 - int(test_case_number) x = os.system(\"diff -wBZ \"+path_to_team_output+\" \"+path_to_correct_output +",
"print(\"x_2 = \" + str(x_2)) # print(\"x_3 = \" + str(x_3)) # print(\"x_4",
"0): correctness.append(0) else: correctness.append(scores[0]) print(\"CORRECTNESS\") print(correctness) return correctness def check_test_case(path_to_team_output, path_to_correct_output, test_case_number): print(\"team_output",
"output_path is None: print(\"SKIPPING TEST CASE \") continue print(output_path) test_case = output_paths.index(output_path) +",
"print(correctness) return correctness def check_test_case(path_to_team_output, path_to_correct_output, test_case_number): print(\"team_output = \" + path_to_team_output) print(\"setters_output",
"correctness = [] for output_path in output_paths: scores = [] if output_path is",
"= 4 - int(test_case_number) x = os.system(\"diff -wBZ \"+path_to_team_output+\" \"+path_to_correct_output + \" >",
"\"+path_to_correct_output_3 + \" \" + output_paths[0] + \"> abc\") if x_3 == 0:",
"abc\") if x_1 == 0: return [4] '''x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 +",
"+ \"> abc\") x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0]",
"task_number): if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"0.txt\") '''path_to_correct_output_2 =",
"correctness.append(scores[0]) print(\"CORRECTNESS\") print(correctness) return correctness def check_test_case(path_to_team_output, path_to_correct_output, test_case_number): print(\"team_output = \" +",
"\" + path_to_correct_output) score = 4 - int(test_case_number) x = os.system(\"diff -wBZ \"+path_to_team_output+\"",
"os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0] + \"> abc\") if x_1",
"print(\"x_1 = \" + str(x_1)) # print(\"x_2 = \" + str(x_2)) # print(\"x_3",
"0: return [4] '''x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0]",
"hdfs import InsecureClient from .config import * def test_task_3(output_paths): if(output_paths[0] == None): return",
"x_3 == 0: return [2] x_4 = os.system(\"diff -wBZ \"+path_to_correct_output_4 + \" \"",
"!= 0): scores.append(score) break if(len(scores) == 0): correctness.append(0) else: correctness.append(scores[0]) print(\"CORRECTNESS\") print(correctness) return",
"print(\"x_3 = \" + str(x_3)) # print(\"x_4 = \" + str(x_4)) if x_4",
"+ \"> abc\") if x_2 == 0: return [3] x_3 = os.system(\"diff -wBZ",
"+ str(x_4)) if x_4 == 0: return [1]''' return [0] def test(output_paths, task_number):",
"+ \" \" + output_paths[0] + \"> abc\") if x_1 == 0: return",
"= os.system(\"diff -wBZ \"+path_to_correct_output_4 + \" \" + output_paths[0] + \"> abc\") #",
"\" + str(x_1)) # print(\"x_2 = \" + str(x_2)) # print(\"x_3 = \"",
"+ str(x_1)) # print(\"x_2 = \" + str(x_2)) # print(\"x_3 = \" +",
"[0] def test_task_1_2(output_paths, task_number): if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number,",
"def test_task_1_2(output_paths, task_number): if(output_paths[0] == None): return [0] path_to_correct_output_1 = os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"0.txt\")",
"print(\"SKIPPING TEST CASE \") continue print(output_path) test_case = output_paths.index(output_path) + 1 for i",
"\"Task\"+task_number, \"3.txt\")''' x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0] +",
"+ \"> abc\") if x_3 == 0: return [2] x_4 = os.system(\"diff -wBZ",
"in output_paths: scores = [] if output_path is None: print(\"SKIPPING TEST CASE \")",
"output_paths: scores = [] if output_path is None: print(\"SKIPPING TEST CASE \") continue",
"+ \"> abc\") # print(\"x_1 = \" + str(x_1)) # print(\"x_2 = \"",
"CASE \") continue print(output_path) test_case = output_paths.index(output_path) + 1 for i in range(4):",
"== 0: print(\"TEST CASE \" + str(test_case_number) + \" PASSED\") return score else:",
"print(\"TEST CASE \" + str(test_case_number) + \" PASSED\") return score else: return 0",
"from .config import * def test_task_3(output_paths): if(output_paths[0] == None): return [0] path_to_correct_output_1 =",
"<reponame>IamMayankThakur/test-bigdata<filename>adminmgr/testmgr/testoutput.py import os from hdfs import InsecureClient from .config import * def test_task_3(output_paths):",
"if x_1 == 0: return [4] '''x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \"",
"scores.append(score) break if(len(scores) == 0): correctness.append(0) else: correctness.append(scores[0]) print(\"CORRECTNESS\") print(correctness) return correctness def",
"os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, \"3.txt\")''' x_1 = os.system(\"diff -wBZ \"+path_to_correct_output_1 + \" \" + output_paths[0]",
"\" \" + output_paths[0] + \"> abc\") if x_3 == 0: return [2]",
"x_4 = os.system(\"diff -wBZ \"+path_to_correct_output_4 + \" \" + output_paths[0] + \"> abc\")",
"+ str(x_2)) # print(\"x_3 = \" + str(x_3)) # print(\"x_4 = \" +",
"# print(\"x_4 = \" + str(x_4)) if x_4 == 0: return [1]''' return",
"break if(len(scores) == 0): correctness.append(0) else: correctness.append(scores[0]) print(\"CORRECTNESS\") print(correctness) return correctness def check_test_case(path_to_team_output,",
"abc\") x_2 = os.system(\"diff -wBZ \"+path_to_correct_output_2 + \" \" + output_paths[0] + \">",
"1 for i in range(4): score = check_test_case(output_path, (os.path.join(SETTERS_OUTPUT_BASE_PATH, \"Task\"+task_number, str(i) + \".txt\")),"
] |
[
"# allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name=\"django-trello-webhooks\",",
":: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP ::",
"= open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__),",
"author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Intended",
"Language :: Python', 'Programming Language :: Python :: 2.7', 'Topic :: Internet ::",
"maintainer='<NAME>', maintainer_email='<EMAIL>', classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience ::",
"Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Topic ::",
"maintainer_email='<EMAIL>', classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers',",
"'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming",
"path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name=\"django-trello-webhooks\", version=\"0.3\", packages=[ 'trello_webhooks', 'trello_webhooks.management', 'trello_webhooks.management.commands', 'trello_webhooks.migrations', 'trello_webhooks.templatetags', 'trello_webhooks.tests',",
"'trello_webhooks.management.commands', 'trello_webhooks.migrations', 'trello_webhooks.templatetags', 'trello_webhooks.tests', ], install_requires=['django>=1.7.1'], include_package_data=True, description='Django Trello Webhooks - Trello callback",
"setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name=\"django-trello-webhooks\", version=\"0.3\", packages=[",
":: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License',",
"'trello_webhooks.templatetags', 'trello_webhooks.tests', ], install_requires=['django>=1.7.1'], include_package_data=True, description='Django Trello Webhooks - Trello callback integration for",
"classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License",
"README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py to be run from any path",
"setuptools import setup README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py to be run",
":: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet ::",
"OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Topic",
"include_package_data=True, description='Django Trello Webhooks - Trello callback integration for Django.', long_description=README, url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>',",
"System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python ::",
"Django.', long_description=README, url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', classifiers=[ 'Environment :: Web Environment', 'Framework",
"'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT",
":: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming",
":: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS",
"2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic",
"open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))",
"Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating",
"allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name=\"django-trello-webhooks\", version=\"0.3\",",
"Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System ::",
"Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', ], )",
"author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience",
"os.pardir))) setup( name=\"django-trello-webhooks\", version=\"0.3\", packages=[ 'trello_webhooks', 'trello_webhooks.management', 'trello_webhooks.management.commands', 'trello_webhooks.migrations', 'trello_webhooks.templatetags', 'trello_webhooks.tests', ], install_requires=['django>=1.7.1'],",
"import setup README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py to be run from",
"for Django.', long_description=README, url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', classifiers=[ 'Environment :: Web Environment',",
"'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System",
"'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',",
":: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI",
"Python', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic",
"'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python",
"from setuptools import setup README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py to be",
"'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic ::",
"install_requires=['django>=1.7.1'], include_package_data=True, description='Django Trello Webhooks - Trello callback integration for Django.', long_description=README, url='https://github.com/yunojuno/django-trello-webhooks',",
"Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python',",
"MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language",
"License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language ::",
"long_description=README, url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', classifiers=[ 'Environment :: Web Environment', 'Framework ::",
"setup( name=\"django-trello-webhooks\", version=\"0.3\", packages=[ 'trello_webhooks', 'trello_webhooks.management', 'trello_webhooks.management.commands', 'trello_webhooks.migrations', 'trello_webhooks.templatetags', 'trello_webhooks.tests', ], install_requires=['django>=1.7.1'], include_package_data=True,",
"Webhooks - Trello callback integration for Django.', long_description=README, url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>',",
"'trello_webhooks.migrations', 'trello_webhooks.templatetags', 'trello_webhooks.tests', ], install_requires=['django>=1.7.1'], include_package_data=True, description='Django Trello Webhooks - Trello callback integration",
"OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language ::",
"'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License ::",
"os from setuptools import setup README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py to",
"name=\"django-trello-webhooks\", version=\"0.3\", packages=[ 'trello_webhooks', 'trello_webhooks.management', 'trello_webhooks.management.commands', 'trello_webhooks.migrations', 'trello_webhooks.templatetags', 'trello_webhooks.tests', ], install_requires=['django>=1.7.1'], include_package_data=True, description='Django",
"run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name=\"django-trello-webhooks\", version=\"0.3\", packages=[ 'trello_webhooks', 'trello_webhooks.management', 'trello_webhooks.management.commands',",
"setup README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py to be run from any",
"Trello callback integration for Django.', long_description=README, url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', classifiers=[ 'Environment",
":: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7',",
"Trello Webhooks - Trello callback integration for Django.', long_description=README, url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>',",
"from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name=\"django-trello-webhooks\", version=\"0.3\", packages=[ 'trello_webhooks', 'trello_webhooks.management', 'trello_webhooks.management.commands', 'trello_webhooks.migrations',",
"packages=[ 'trello_webhooks', 'trello_webhooks.management', 'trello_webhooks.management.commands', 'trello_webhooks.migrations', 'trello_webhooks.templatetags', 'trello_webhooks.tests', ], install_requires=['django>=1.7.1'], include_package_data=True, description='Django Trello Webhooks",
"Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent',",
"'trello_webhooks.tests', ], install_requires=['django>=1.7.1'], include_package_data=True, description='Django Trello Webhooks - Trello callback integration for Django.',",
"be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name=\"django-trello-webhooks\", version=\"0.3\", packages=[ 'trello_webhooks', 'trello_webhooks.management',",
"'trello_webhooks', 'trello_webhooks.management', 'trello_webhooks.management.commands', 'trello_webhooks.migrations', 'trello_webhooks.templatetags', 'trello_webhooks.tests', ], install_requires=['django>=1.7.1'], include_package_data=True, description='Django Trello Webhooks -",
"'README.rst')).read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup(",
":: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', ],",
":: Python', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP',",
"import os from setuptools import setup README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py",
"Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet",
"description='Django Trello Webhooks - Trello callback integration for Django.', long_description=README, url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>', author_email='<EMAIL>',",
"], install_requires=['django>=1.7.1'], include_package_data=True, description='Django Trello Webhooks - Trello callback integration for Django.', long_description=README,",
"os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name=\"django-trello-webhooks\", version=\"0.3\", packages=[ 'trello_webhooks', 'trello_webhooks.management', 'trello_webhooks.management.commands', 'trello_webhooks.migrations', 'trello_webhooks.templatetags', 'trello_webhooks.tests', ],",
"'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Topic :: Internet",
"any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name=\"django-trello-webhooks\", version=\"0.3\", packages=[ 'trello_webhooks', 'trello_webhooks.management', 'trello_webhooks.management.commands', 'trello_webhooks.migrations', 'trello_webhooks.templatetags',",
"version=\"0.3\", packages=[ 'trello_webhooks', 'trello_webhooks.management', 'trello_webhooks.management.commands', 'trello_webhooks.migrations', 'trello_webhooks.templatetags', 'trello_webhooks.tests', ], install_requires=['django>=1.7.1'], include_package_data=True, description='Django Trello",
"url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', classifiers=[ 'Environment :: Web Environment', 'Framework :: Django',",
"Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP",
":: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language",
"Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved ::",
"- Trello callback integration for Django.', long_description=README, url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', classifiers=[",
"'trello_webhooks.management', 'trello_webhooks.management.commands', 'trello_webhooks.migrations', 'trello_webhooks.templatetags', 'trello_webhooks.tests', ], install_requires=['django>=1.7.1'], include_package_data=True, description='Django Trello Webhooks - Trello",
"Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved",
"integration for Django.', long_description=README, url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', classifiers=[ 'Environment :: Web",
"callback integration for Django.', long_description=README, url='https://github.com/yunojuno/django-trello-webhooks', author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', classifiers=[ 'Environment ::",
"to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name=\"django-trello-webhooks\", version=\"0.3\", packages=[ 'trello_webhooks',"
] |
[
"# subtract item bias for negative inner product items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all()",
"item_biases, item_factors = model.get_item_representations() users = [dict(id=i, factors=factors) for i, factors in enumerate(user_factors)]",
"factors = Column(Vector(20)) class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) title",
"Column(Float) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) data = fetch_movielens(min_rating=5.0) model = LightFM(loss='warp', no_components=20) model.fit(data['train'], epochs=30) user_biases,",
"= [dict(id=i, factors=factors) for i, factors in enumerate(user_factors)] items = [dict(id=i, title=data['item_labels'][i], factors=factors,",
"with engine.connect() as conn: conn.execute(text('CREATE EXTENSION IF NOT EXISTS vector')) conn.commit() Base =",
"epochs=30) user_biases, user_factors = model.get_user_representations() item_biases, item_factors = model.get_item_representations() users = [dict(id=i, factors=factors)",
"text, Column, Float, Integer, String from sqlalchemy.orm import declarative_base, Session engine = create_engine('postgresql+psycopg2://localhost/pgvector_example',",
"id = Column(Integer, primary_key=True) factors = Column(Vector(20)) class Item(Base): __tablename__ = 'item' id",
"bias for negative inner product items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all() print('user-based recs:', [item.title",
"primary_key=True) factors = Column(Vector(20)) class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True)",
"Item.bias).limit(5).all() print('user-based recs:', [item.title for item in items]) item = session.query(Item).filter(Item.title == 'Star",
"= model.get_item_representations() users = [dict(id=i, factors=factors) for i, factors in enumerate(user_factors)] items =",
"future=True) with engine.connect() as conn: conn.execute(text('CREATE EXTENSION IF NOT EXISTS vector')) conn.commit() Base",
"item bias for negative inner product items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all() print('user-based recs:',",
"users) session.bulk_insert_mappings(Item, items) session.commit() user = session.query(User).get(1) # subtract item bias for negative",
"print('user-based recs:', [item.title for item in items]) item = session.query(Item).filter(Item.title == 'Star Wars",
"EXISTS vector')) conn.commit() Base = declarative_base() class User(Base): __tablename__ = 'user' id =",
"Wars (1977)').first() items = session.query(Item).filter(Item.id != item.id).order_by(Item.factors.cosine_distance(item.factors)).limit(5).all() print('item-based recs:', [item.title for item in",
"conn.commit() Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True)",
"enumerate(user_factors)] items = [dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for i, factors in enumerate(item_factors)] session",
"= [dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for i, factors in enumerate(item_factors)] session = Session(engine)",
"import declarative_base, Session engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True) with engine.connect() as conn: conn.execute(text('CREATE EXTENSION",
"primary_key=True) title = Column(String) factors = Column(Vector(20)) bias = Column(Float) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) data",
"LightFM from lightfm.datasets import fetch_movielens from pgvector.sqlalchemy import Vector from sqlalchemy import create_engine,",
"[dict(id=i, factors=factors) for i, factors in enumerate(user_factors)] items = [dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item())",
"recs:', [item.title for item in items]) item = session.query(Item).filter(Item.title == 'Star Wars (1977)').first()",
"= session.query(Item).filter(Item.title == 'Star Wars (1977)').first() items = session.query(Item).filter(Item.id != item.id).order_by(Item.factors.cosine_distance(item.factors)).limit(5).all() print('item-based recs:',",
"session.query(User).get(1) # subtract item bias for negative inner product items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) -",
"lightfm.datasets import fetch_movielens from pgvector.sqlalchemy import Vector from sqlalchemy import create_engine, text, Column,",
"factors in enumerate(user_factors)] items = [dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for i, factors in",
"= Session(engine) session.bulk_insert_mappings(User, users) session.bulk_insert_mappings(Item, items) session.commit() user = session.query(User).get(1) # subtract item",
"[item.title for item in items]) item = session.query(Item).filter(Item.title == 'Star Wars (1977)').first() items",
"'Star Wars (1977)').first() items = session.query(Item).filter(Item.id != item.id).order_by(Item.factors.cosine_distance(item.factors)).limit(5).all() print('item-based recs:', [item.title for item",
"Session(engine) session.bulk_insert_mappings(User, users) session.bulk_insert_mappings(Item, items) session.commit() user = session.query(User).get(1) # subtract item bias",
"create_engine, text, Column, Float, Integer, String from sqlalchemy.orm import declarative_base, Session engine =",
"session.bulk_insert_mappings(User, users) session.bulk_insert_mappings(Item, items) session.commit() user = session.query(User).get(1) # subtract item bias for",
"enumerate(item_factors)] session = Session(engine) session.bulk_insert_mappings(User, users) session.bulk_insert_mappings(Item, items) session.commit() user = session.query(User).get(1) #",
"items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all() print('user-based recs:', [item.title for item in items]) item",
"user_factors = model.get_user_representations() item_biases, item_factors = model.get_item_representations() users = [dict(id=i, factors=factors) for i,",
"create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True) with engine.connect() as conn: conn.execute(text('CREATE EXTENSION IF NOT EXISTS vector')) conn.commit()",
"session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all() print('user-based recs:', [item.title for item in items]) item = session.query(Item).filter(Item.title",
"= Column(Integer, primary_key=True) title = Column(String) factors = Column(Vector(20)) bias = Column(Float) Base.metadata.drop_all(engine)",
"__tablename__ = 'user' id = Column(Integer, primary_key=True) factors = Column(Vector(20)) class Item(Base): __tablename__",
"in enumerate(user_factors)] items = [dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for i, factors in enumerate(item_factors)]",
"session.bulk_insert_mappings(Item, items) session.commit() user = session.query(User).get(1) # subtract item bias for negative inner",
"in items]) item = session.query(Item).filter(Item.title == 'Star Wars (1977)').first() items = session.query(Item).filter(Item.id !=",
"items) session.commit() user = session.query(User).get(1) # subtract item bias for negative inner product",
"- Item.bias).limit(5).all() print('user-based recs:', [item.title for item in items]) item = session.query(Item).filter(Item.title ==",
"bias = Column(Float) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) data = fetch_movielens(min_rating=5.0) model = LightFM(loss='warp', no_components=20) model.fit(data['train'],",
"EXTENSION IF NOT EXISTS vector')) conn.commit() Base = declarative_base() class User(Base): __tablename__ =",
"Float, Integer, String from sqlalchemy.orm import declarative_base, Session engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True) with",
"class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) factors = Column(Vector(20)) class",
"class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) title = Column(String) factors",
"i, factors in enumerate(item_factors)] session = Session(engine) session.bulk_insert_mappings(User, users) session.bulk_insert_mappings(Item, items) session.commit() user",
"declarative_base, Session engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True) with engine.connect() as conn: conn.execute(text('CREATE EXTENSION IF",
"import create_engine, text, Column, Float, Integer, String from sqlalchemy.orm import declarative_base, Session engine",
"IF NOT EXISTS vector')) conn.commit() Base = declarative_base() class User(Base): __tablename__ = 'user'",
"= Column(Vector(20)) class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) title =",
"title = Column(String) factors = Column(Vector(20)) bias = Column(Float) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) data =",
"factors=factors, bias=item_biases[i].item()) for i, factors in enumerate(item_factors)] session = Session(engine) session.bulk_insert_mappings(User, users) session.bulk_insert_mappings(Item,",
"id = Column(Integer, primary_key=True) title = Column(String) factors = Column(Vector(20)) bias = Column(Float)",
"Column(String) factors = Column(Vector(20)) bias = Column(Float) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) data = fetch_movielens(min_rating=5.0) model",
"= create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True) with engine.connect() as conn: conn.execute(text('CREATE EXTENSION IF NOT EXISTS vector'))",
"import fetch_movielens from pgvector.sqlalchemy import Vector from sqlalchemy import create_engine, text, Column, Float,",
"= Column(Float) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) data = fetch_movielens(min_rating=5.0) model = LightFM(loss='warp', no_components=20) model.fit(data['train'], epochs=30)",
"import Vector from sqlalchemy import create_engine, text, Column, Float, Integer, String from sqlalchemy.orm",
"from sqlalchemy.orm import declarative_base, Session engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True) with engine.connect() as conn:",
"conn: conn.execute(text('CREATE EXTENSION IF NOT EXISTS vector')) conn.commit() Base = declarative_base() class User(Base):",
"factors=factors) for i, factors in enumerate(user_factors)] items = [dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for",
"= Column(Vector(20)) bias = Column(Float) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) data = fetch_movielens(min_rating=5.0) model = LightFM(loss='warp',",
"Base.metadata.create_all(engine) data = fetch_movielens(min_rating=5.0) model = LightFM(loss='warp', no_components=20) model.fit(data['train'], epochs=30) user_biases, user_factors =",
"in enumerate(item_factors)] session = Session(engine) session.bulk_insert_mappings(User, users) session.bulk_insert_mappings(Item, items) session.commit() user = session.query(User).get(1)",
"session.commit() user = session.query(User).get(1) # subtract item bias for negative inner product items",
"from pgvector.sqlalchemy import Vector from sqlalchemy import create_engine, text, Column, Float, Integer, String",
"user = session.query(User).get(1) # subtract item bias for negative inner product items =",
"LightFM(loss='warp', no_components=20) model.fit(data['train'], epochs=30) user_biases, user_factors = model.get_user_representations() item_biases, item_factors = model.get_item_representations() users",
"vector')) conn.commit() Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer,",
"sqlalchemy.orm import declarative_base, Session engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True) with engine.connect() as conn: conn.execute(text('CREATE",
"subtract item bias for negative inner product items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all() print('user-based",
"__tablename__ = 'item' id = Column(Integer, primary_key=True) title = Column(String) factors = Column(Vector(20))",
"users = [dict(id=i, factors=factors) for i, factors in enumerate(user_factors)] items = [dict(id=i, title=data['item_labels'][i],",
"(1977)').first() items = session.query(Item).filter(Item.id != item.id).order_by(Item.factors.cosine_distance(item.factors)).limit(5).all() print('item-based recs:', [item.title for item in items])",
"title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for i, factors in enumerate(item_factors)] session = Session(engine) session.bulk_insert_mappings(User, users)",
"= 'user' id = Column(Integer, primary_key=True) factors = Column(Vector(20)) class Item(Base): __tablename__ =",
"declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) factors = Column(Vector(20))",
"conn.execute(text('CREATE EXTENSION IF NOT EXISTS vector')) conn.commit() Base = declarative_base() class User(Base): __tablename__",
"Column(Integer, primary_key=True) factors = Column(Vector(20)) class Item(Base): __tablename__ = 'item' id = Column(Integer,",
"NOT EXISTS vector')) conn.commit() Base = declarative_base() class User(Base): __tablename__ = 'user' id",
"Column(Vector(20)) class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) title = Column(String)",
"for i, factors in enumerate(user_factors)] items = [dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for i,",
"Base.metadata.drop_all(engine) Base.metadata.create_all(engine) data = fetch_movielens(min_rating=5.0) model = LightFM(loss='warp', no_components=20) model.fit(data['train'], epochs=30) user_biases, user_factors",
"model.fit(data['train'], epochs=30) user_biases, user_factors = model.get_user_representations() item_biases, item_factors = model.get_item_representations() users = [dict(id=i,",
"Session engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True) with engine.connect() as conn: conn.execute(text('CREATE EXTENSION IF NOT",
"= LightFM(loss='warp', no_components=20) model.fit(data['train'], epochs=30) user_biases, user_factors = model.get_user_representations() item_biases, item_factors = model.get_item_representations()",
"inner product items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all() print('user-based recs:', [item.title for item in",
"from lightfm import LightFM from lightfm.datasets import fetch_movielens from pgvector.sqlalchemy import Vector from",
"user_biases, user_factors = model.get_user_representations() item_biases, item_factors = model.get_item_representations() users = [dict(id=i, factors=factors) for",
"no_components=20) model.fit(data['train'], epochs=30) user_biases, user_factors = model.get_user_representations() item_biases, item_factors = model.get_item_representations() users =",
"pgvector.sqlalchemy import Vector from sqlalchemy import create_engine, text, Column, Float, Integer, String from",
"Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) factors",
"= 'item' id = Column(Integer, primary_key=True) title = Column(String) factors = Column(Vector(20)) bias",
"= fetch_movielens(min_rating=5.0) model = LightFM(loss='warp', no_components=20) model.fit(data['train'], epochs=30) user_biases, user_factors = model.get_user_representations() item_biases,",
"bias=item_biases[i].item()) for i, factors in enumerate(item_factors)] session = Session(engine) session.bulk_insert_mappings(User, users) session.bulk_insert_mappings(Item, items)",
"engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True) with engine.connect() as conn: conn.execute(text('CREATE EXTENSION IF NOT EXISTS",
"Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) title = Column(String) factors =",
"for negative inner product items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all() print('user-based recs:', [item.title for",
"model.get_user_representations() item_biases, item_factors = model.get_item_representations() users = [dict(id=i, factors=factors) for i, factors in",
"for i, factors in enumerate(item_factors)] session = Session(engine) session.bulk_insert_mappings(User, users) session.bulk_insert_mappings(Item, items) session.commit()",
"item_factors = model.get_item_representations() users = [dict(id=i, factors=factors) for i, factors in enumerate(user_factors)] items",
"from sqlalchemy import create_engine, text, Column, Float, Integer, String from sqlalchemy.orm import declarative_base,",
"lightfm import LightFM from lightfm.datasets import fetch_movielens from pgvector.sqlalchemy import Vector from sqlalchemy",
"fetch_movielens from pgvector.sqlalchemy import Vector from sqlalchemy import create_engine, text, Column, Float, Integer,",
"factors in enumerate(item_factors)] session = Session(engine) session.bulk_insert_mappings(User, users) session.bulk_insert_mappings(Item, items) session.commit() user =",
"Column(Integer, primary_key=True) title = Column(String) factors = Column(Vector(20)) bias = Column(Float) Base.metadata.drop_all(engine) Base.metadata.create_all(engine)",
"for item in items]) item = session.query(Item).filter(Item.title == 'Star Wars (1977)').first() items =",
"session = Session(engine) session.bulk_insert_mappings(User, users) session.bulk_insert_mappings(Item, items) session.commit() user = session.query(User).get(1) # subtract",
"model.get_item_representations() users = [dict(id=i, factors=factors) for i, factors in enumerate(user_factors)] items = [dict(id=i,",
"'item' id = Column(Integer, primary_key=True) title = Column(String) factors = Column(Vector(20)) bias =",
"session.query(Item).filter(Item.title == 'Star Wars (1977)').first() items = session.query(Item).filter(Item.id != item.id).order_by(Item.factors.cosine_distance(item.factors)).limit(5).all() print('item-based recs:', [item.title",
"negative inner product items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all() print('user-based recs:', [item.title for item",
"'user' id = Column(Integer, primary_key=True) factors = Column(Vector(20)) class Item(Base): __tablename__ = 'item'",
"= declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) factors =",
"sqlalchemy import create_engine, text, Column, Float, Integer, String from sqlalchemy.orm import declarative_base, Session",
"as conn: conn.execute(text('CREATE EXTENSION IF NOT EXISTS vector')) conn.commit() Base = declarative_base() class",
"Vector from sqlalchemy import create_engine, text, Column, Float, Integer, String from sqlalchemy.orm import",
"== 'Star Wars (1977)').first() items = session.query(Item).filter(Item.id != item.id).order_by(Item.factors.cosine_distance(item.factors)).limit(5).all() print('item-based recs:', [item.title for",
"fetch_movielens(min_rating=5.0) model = LightFM(loss='warp', no_components=20) model.fit(data['train'], epochs=30) user_biases, user_factors = model.get_user_representations() item_biases, item_factors",
"= Column(String) factors = Column(Vector(20)) bias = Column(Float) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) data = fetch_movielens(min_rating=5.0)",
"items = [dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for i, factors in enumerate(item_factors)] session =",
"User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) factors = Column(Vector(20)) class Item(Base):",
"model = LightFM(loss='warp', no_components=20) model.fit(data['train'], epochs=30) user_biases, user_factors = model.get_user_representations() item_biases, item_factors =",
"Integer, String from sqlalchemy.orm import declarative_base, Session engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True) with engine.connect()",
"= session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all() print('user-based recs:', [item.title for item in items]) item =",
"product items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors) - Item.bias).limit(5).all() print('user-based recs:', [item.title for item in items])",
"engine.connect() as conn: conn.execute(text('CREATE EXTENSION IF NOT EXISTS vector')) conn.commit() Base = declarative_base()",
"Column, Float, Integer, String from sqlalchemy.orm import declarative_base, Session engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True)",
"items]) item = session.query(Item).filter(Item.title == 'Star Wars (1977)').first() items = session.query(Item).filter(Item.id != item.id).order_by(Item.factors.cosine_distance(item.factors)).limit(5).all()",
"import LightFM from lightfm.datasets import fetch_movielens from pgvector.sqlalchemy import Vector from sqlalchemy import",
"[dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for i, factors in enumerate(item_factors)] session = Session(engine) session.bulk_insert_mappings(User,",
"factors = Column(Vector(20)) bias = Column(Float) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) data = fetch_movielens(min_rating=5.0) model =",
"data = fetch_movielens(min_rating=5.0) model = LightFM(loss='warp', no_components=20) model.fit(data['train'], epochs=30) user_biases, user_factors = model.get_user_representations()",
"item = session.query(Item).filter(Item.title == 'Star Wars (1977)').first() items = session.query(Item).filter(Item.id != item.id).order_by(Item.factors.cosine_distance(item.factors)).limit(5).all() print('item-based",
"= Column(Integer, primary_key=True) factors = Column(Vector(20)) class Item(Base): __tablename__ = 'item' id =",
"Column(Vector(20)) bias = Column(Float) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) data = fetch_movielens(min_rating=5.0) model = LightFM(loss='warp', no_components=20)",
"= model.get_user_representations() item_biases, item_factors = model.get_item_representations() users = [dict(id=i, factors=factors) for i, factors",
"item in items]) item = session.query(Item).filter(Item.title == 'Star Wars (1977)').first() items = session.query(Item).filter(Item.id",
"i, factors in enumerate(user_factors)] items = [dict(id=i, title=data['item_labels'][i], factors=factors, bias=item_biases[i].item()) for i, factors",
"from lightfm.datasets import fetch_movielens from pgvector.sqlalchemy import Vector from sqlalchemy import create_engine, text,",
"= session.query(User).get(1) # subtract item bias for negative inner product items = session.query(Item).order_by(Item.factors.max_inner_product(user.factors)",
"String from sqlalchemy.orm import declarative_base, Session engine = create_engine('postgresql+psycopg2://localhost/pgvector_example', future=True) with engine.connect() as"
] |
[
"object\"\"\" import os import pickle import sys import CommonEnvironment from CommonEnvironment.Interface import staticderived,",
"# | Public Properties Name = DerivedProperty(\"Pickle\") Description = DerivedProperty(\"Pickles each element to",
"return True # ---------------------------------------------------------------------- @staticmethod @override def GenerateCustomSettingsAndDefaults(): return [] # ---------------------------------------------------------------------- @staticmethod",
"with status_stream.DoneManager() as status_dm: generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert os.path.isdir(generator_path), generator_path generator_path = os.path.dirname(generator_path)",
"# ---------------------------------------------------------------------- # | Public Properties Name = DerivedProperty(\"Pickle\") Description = DerivedProperty(\"Pickles each",
"Public Properties Name = DerivedProperty(\"Pickle\") Description = DerivedProperty(\"Pickles each element to a file\")",
"output_filenames, name, elements, include_indexes, status_stream, verbose_stream, verbose, **custom_settings ): assert len(output_filenames) == 2,",
"to a file\") Flags = DerivedProperty(ParseFlag.AllFlags) # ---------------------------------------------------------------------- # | Public Methods @staticmethod",
"== 2, output_filenames # Pickle status_stream.write(\"Creating '{}'...\".format(output_filenames[0])) with status_stream.DoneManager() as status_dm: with open(output_filenames[0],",
"= DerivedProperty(ParseFlag.AllFlags) # ---------------------------------------------------------------------- # | Public Methods @staticmethod @override def IsValidEnvironment(): return",
"return [] # ---------------------------------------------------------------------- @staticmethod @override def GenerateOutputFilenames(context): # Return a single item",
"# | <NAME> <<EMAIL>> # | 2020-07-24 15:08:32 # | # ---------------------------------------------------------------------- #",
"@staticmethod @override def IsValidEnvironment(): return True # ---------------------------------------------------------------------- @staticmethod @override def GenerateCustomSettingsAndDefaults(): return",
"CommonEnvironment.Interface import staticderived, override, DerivedProperty from CommonEnvironmentEx.Package import InitRelativeImports # ---------------------------------------------------------------------- _script_fullpath =",
"each element to a file\") Flags = DerivedProperty(ParseFlag.AllFlags) # ---------------------------------------------------------------------- # | Public",
"# | # ---------------------------------------------------------------------- \"\"\"Contains the Plugin object\"\"\" import os import pickle import",
"---------------------------------------------------------------------- @staticmethod @override def GenerateOutputFilenames(context): # Return a single item (that will never",
"output_filenames # Pickle status_stream.write(\"Creating '{}'...\".format(output_filenames[0])) with status_stream.DoneManager() as status_dm: with open(output_filenames[0], \"wb\") as",
"f) # Path status_stream.write(\"Creating '{}'...\".format(output_filenames[1])) with status_stream.DoneManager() as status_dm: generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert",
"Pickle status_stream.write(\"Creating '{}'...\".format(output_filenames[0])) with status_stream.DoneManager() as status_dm: with open(output_filenames[0], \"wb\") as f: pickle.dump(elements,",
"an empty lists # aren't supported. return [\"{}.{}\".format(context[\"output_name\"], ext) for ext in [\"pickle\",",
"---------------------------------------------------------------------- # | # | Copyright <NAME> 2020-21 # | Distributed under the",
"the Plugin object\"\"\" import os import pickle import sys import CommonEnvironment from CommonEnvironment.Interface",
"= os.path.split(_script_fullpath) # ---------------------------------------------------------------------- with InitRelativeImports(): from ..Plugin import Plugin as PluginBase, ParseFlag,",
"CommonEnvironment from CommonEnvironment.Interface import staticderived, override, DerivedProperty from CommonEnvironmentEx.Package import InitRelativeImports # ----------------------------------------------------------------------",
"in [\"pickle\", \"path\"]] # ---------------------------------------------------------------------- @staticmethod @override def Generate( simple_schema_generator, invoke_reason, input_filenames, output_filenames,",
"import InitRelativeImports # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) # ----------------------------------------------------------------------",
"# Pickle status_stream.write(\"Creating '{}'...\".format(output_filenames[0])) with status_stream.DoneManager() as status_dm: with open(output_filenames[0], \"wb\") as f:",
"status_stream, verbose_stream, verbose, **custom_settings ): assert len(output_filenames) == 2, output_filenames # Pickle status_stream.write(\"Creating",
"GenerateCustomSettingsAndDefaults(): return [] # ---------------------------------------------------------------------- @staticmethod @override def GenerateOutputFilenames(context): # Return a single",
"PicklePlugin.py # | # | <NAME> <<EMAIL>> # | 2020-07-24 15:08:32 # |",
"CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- with InitRelativeImports(): from ..Plugin import Plugin",
"2020-21 # | Distributed under the Boost Software License, Version 1.0. See #",
"as status_dm: with open(output_filenames[0], \"wb\") as f: pickle.dump(elements, f) # Path status_stream.write(\"Creating '{}'...\".format(output_filenames[1]))",
"..Plugin import Plugin as PluginBase, ParseFlag, Extension # ---------------------------------------------------------------------- @staticderived class Plugin(PluginBase): #",
"= CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- with InitRelativeImports(): from ..Plugin import",
"for ext in [\"pickle\", \"path\"]] # ---------------------------------------------------------------------- @staticmethod @override def Generate( simple_schema_generator, invoke_reason,",
"the Boost Software License, Version 1.0. See # | accompanying file LICENSE_1_0.txt or",
"): assert len(output_filenames) == 2, output_filenames # Pickle status_stream.write(\"Creating '{}'...\".format(output_filenames[0])) with status_stream.DoneManager() as",
"---------------------------------------------------------------------- \"\"\"Contains the Plugin object\"\"\" import os import pickle import sys import CommonEnvironment",
"# ---------------------------------------------------------------------- @staticderived class Plugin(PluginBase): # ---------------------------------------------------------------------- # | Public Properties Name =",
"\"\"\"Contains the Plugin object\"\"\" import os import pickle import sys import CommonEnvironment from",
"# ---------------------------------------------------------------------- with InitRelativeImports(): from ..Plugin import Plugin as PluginBase, ParseFlag, Extension #",
"status_stream.write(\"Creating '{}'...\".format(output_filenames[0])) with status_stream.DoneManager() as status_dm: with open(output_filenames[0], \"wb\") as f: pickle.dump(elements, f)",
"Return a single item (that will never be used), as an empty lists",
"GenerateOutputFilenames(context): # Return a single item (that will never be used), as an",
"with open(output_filenames[0], \"wb\") as f: pickle.dump(elements, f) # Path status_stream.write(\"Creating '{}'...\".format(output_filenames[1])) with status_stream.DoneManager()",
"# | PicklePlugin.py # | # | <NAME> <<EMAIL>> # | 2020-07-24 15:08:32",
"import os import pickle import sys import CommonEnvironment from CommonEnvironment.Interface import staticderived, override,",
"IsValidEnvironment(): return True # ---------------------------------------------------------------------- @staticmethod @override def GenerateCustomSettingsAndDefaults(): return [] # ----------------------------------------------------------------------",
"DerivedProperty(ParseFlag.AllFlags) # ---------------------------------------------------------------------- # | Public Methods @staticmethod @override def IsValidEnvironment(): return True",
"verbose_stream, verbose, **custom_settings ): assert len(output_filenames) == 2, output_filenames # Pickle status_stream.write(\"Creating '{}'...\".format(output_filenames[0]))",
"status_stream.DoneManager() as status_dm: with open(output_filenames[0], \"wb\") as f: pickle.dump(elements, f) # Path status_stream.write(\"Creating",
"Path status_stream.write(\"Creating '{}'...\".format(output_filenames[1])) with status_stream.DoneManager() as status_dm: generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert os.path.isdir(generator_path), generator_path",
"from CommonEnvironment.Interface import staticderived, override, DerivedProperty from CommonEnvironmentEx.Package import InitRelativeImports # ---------------------------------------------------------------------- _script_fullpath",
"# Path status_stream.write(\"Creating '{}'...\".format(output_filenames[1])) with status_stream.DoneManager() as status_dm: generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert os.path.isdir(generator_path),",
"# | http://www.boost.org/LICENSE_1_0.txt. # | # ---------------------------------------------------------------------- \"\"\"Contains the Plugin object\"\"\" import os",
"def IsValidEnvironment(): return True # ---------------------------------------------------------------------- @staticmethod @override def GenerateCustomSettingsAndDefaults(): return [] #",
"15:08:32 # | # ---------------------------------------------------------------------- # | # | Copyright <NAME> 2020-21 #",
"as status_dm: generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert os.path.isdir(generator_path), generator_path generator_path = os.path.dirname(generator_path) with open(output_filenames[1],",
"return [\"{}.{}\".format(context[\"output_name\"], ext) for ext in [\"pickle\", \"path\"]] # ---------------------------------------------------------------------- @staticmethod @override def",
"elements, include_indexes, status_stream, verbose_stream, verbose, **custom_settings ): assert len(output_filenames) == 2, output_filenames #",
"assert len(output_filenames) == 2, output_filenames # Pickle status_stream.write(\"Creating '{}'...\".format(output_filenames[0])) with status_stream.DoneManager() as status_dm:",
"'{}'...\".format(output_filenames[0])) with status_stream.DoneManager() as status_dm: with open(output_filenames[0], \"wb\") as f: pickle.dump(elements, f) #",
"os import pickle import sys import CommonEnvironment from CommonEnvironment.Interface import staticderived, override, DerivedProperty",
"as PluginBase, ParseFlag, Extension # ---------------------------------------------------------------------- @staticderived class Plugin(PluginBase): # ---------------------------------------------------------------------- # |",
"from ..Plugin import Plugin as PluginBase, ParseFlag, Extension # ---------------------------------------------------------------------- @staticderived class Plugin(PluginBase):",
"InitRelativeImports(): from ..Plugin import Plugin as PluginBase, ParseFlag, Extension # ---------------------------------------------------------------------- @staticderived class",
"a single item (that will never be used), as an empty lists #",
"License, Version 1.0. See # | accompanying file LICENSE_1_0.txt or copy at #",
"import staticderived, override, DerivedProperty from CommonEnvironmentEx.Package import InitRelativeImports # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath()",
"used), as an empty lists # aren't supported. return [\"{}.{}\".format(context[\"output_name\"], ext) for ext",
"f: pickle.dump(elements, f) # Path status_stream.write(\"Creating '{}'...\".format(output_filenames[1])) with status_stream.DoneManager() as status_dm: generator_path =",
"status_stream.write(\"Creating '{}'...\".format(output_filenames[1])) with status_stream.DoneManager() as status_dm: generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert os.path.isdir(generator_path), generator_path generator_path",
"Copyright <NAME> 2020-21 # | Distributed under the Boost Software License, Version 1.0.",
"verbose, **custom_settings ): assert len(output_filenames) == 2, output_filenames # Pickle status_stream.write(\"Creating '{}'...\".format(output_filenames[0])) with",
"override, DerivedProperty from CommonEnvironmentEx.Package import InitRelativeImports # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name",
"empty lists # aren't supported. return [\"{}.{}\".format(context[\"output_name\"], ext) for ext in [\"pickle\", \"path\"]]",
"<filename>src/SimpleSchemaGenerator/Plugins/PicklePlugin.py # ---------------------------------------------------------------------- # | # | PicklePlugin.py # | # | <NAME>",
"_script_dir, _script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- with InitRelativeImports(): from ..Plugin import Plugin as",
"Description = DerivedProperty(\"Pickles each element to a file\") Flags = DerivedProperty(ParseFlag.AllFlags) # ----------------------------------------------------------------------",
"never be used), as an empty lists # aren't supported. return [\"{}.{}\".format(context[\"output_name\"], ext)",
"\"wb\") as f: pickle.dump(elements, f) # Path status_stream.write(\"Creating '{}'...\".format(output_filenames[1])) with status_stream.DoneManager() as status_dm:",
"| Public Properties Name = DerivedProperty(\"Pickle\") Description = DerivedProperty(\"Pickles each element to a",
"---------------------------------------------------------------------- # | Public Methods @staticmethod @override def IsValidEnvironment(): return True # ----------------------------------------------------------------------",
"# | # | <NAME> <<EMAIL>> # | 2020-07-24 15:08:32 # | #",
"import sys import CommonEnvironment from CommonEnvironment.Interface import staticderived, override, DerivedProperty from CommonEnvironmentEx.Package import",
"include_indexes, status_stream, verbose_stream, verbose, **custom_settings ): assert len(output_filenames) == 2, output_filenames # Pickle",
"# | # ---------------------------------------------------------------------- # | # | Copyright <NAME> 2020-21 # |",
"1.0. See # | accompanying file LICENSE_1_0.txt or copy at # | http://www.boost.org/LICENSE_1_0.txt.",
"| <NAME> <<EMAIL>> # | 2020-07-24 15:08:32 # | # ---------------------------------------------------------------------- # |",
"item (that will never be used), as an empty lists # aren't supported.",
"---------------------------------------------------------------------- with InitRelativeImports(): from ..Plugin import Plugin as PluginBase, ParseFlag, Extension # ----------------------------------------------------------------------",
"as f: pickle.dump(elements, f) # Path status_stream.write(\"Creating '{}'...\".format(output_filenames[1])) with status_stream.DoneManager() as status_dm: generator_path",
"| Public Methods @staticmethod @override def IsValidEnvironment(): return True # ---------------------------------------------------------------------- @staticmethod @override",
"be used), as an empty lists # aren't supported. return [\"{}.{}\".format(context[\"output_name\"], ext) for",
"# ---------------------------------------------------------------------- \"\"\"Contains the Plugin object\"\"\" import os import pickle import sys import",
"will never be used), as an empty lists # aren't supported. return [\"{}.{}\".format(context[\"output_name\"],",
"file LICENSE_1_0.txt or copy at # | http://www.boost.org/LICENSE_1_0.txt. # | # ---------------------------------------------------------------------- \"\"\"Contains",
"Plugin as PluginBase, ParseFlag, Extension # ---------------------------------------------------------------------- @staticderived class Plugin(PluginBase): # ---------------------------------------------------------------------- #",
"# Return a single item (that will never be used), as an empty",
"copy at # | http://www.boost.org/LICENSE_1_0.txt. # | # ---------------------------------------------------------------------- \"\"\"Contains the Plugin object\"\"\"",
"@override def GenerateCustomSettingsAndDefaults(): return [] # ---------------------------------------------------------------------- @staticmethod @override def GenerateOutputFilenames(context): # Return",
"lists # aren't supported. return [\"{}.{}\".format(context[\"output_name\"], ext) for ext in [\"pickle\", \"path\"]] #",
"| accompanying file LICENSE_1_0.txt or copy at # | http://www.boost.org/LICENSE_1_0.txt. # | #",
"simple_schema_generator, invoke_reason, input_filenames, output_filenames, name, elements, include_indexes, status_stream, verbose_stream, verbose, **custom_settings ): assert",
"---------------------------------------------------------------------- @staticmethod @override def Generate( simple_schema_generator, invoke_reason, input_filenames, output_filenames, name, elements, include_indexes, status_stream,",
"# | # | Copyright <NAME> 2020-21 # | Distributed under the Boost",
"ext) for ext in [\"pickle\", \"path\"]] # ---------------------------------------------------------------------- @staticmethod @override def Generate( simple_schema_generator,",
"CommonEnvironmentEx.Package import InitRelativeImports # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) #",
"InitRelativeImports # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- with",
"_script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- with InitRelativeImports(): from ..Plugin import Plugin as PluginBase,",
"ext in [\"pickle\", \"path\"]] # ---------------------------------------------------------------------- @staticmethod @override def Generate( simple_schema_generator, invoke_reason, input_filenames,",
"as an empty lists # aren't supported. return [\"{}.{}\".format(context[\"output_name\"], ext) for ext in",
"| Copyright <NAME> 2020-21 # | Distributed under the Boost Software License, Version",
"<NAME> <<EMAIL>> # | 2020-07-24 15:08:32 # | # ---------------------------------------------------------------------- # | #",
"_script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- with InitRelativeImports(): from ..Plugin",
"| 2020-07-24 15:08:32 # | # ---------------------------------------------------------------------- # | # | Copyright <NAME>",
"# | accompanying file LICENSE_1_0.txt or copy at # | http://www.boost.org/LICENSE_1_0.txt. # |",
"| # | <NAME> <<EMAIL>> # | 2020-07-24 15:08:32 # | # ----------------------------------------------------------------------",
"# | # | PicklePlugin.py # | # | <NAME> <<EMAIL>> # |",
"---------------------------------------------------------------------- # | Public Properties Name = DerivedProperty(\"Pickle\") Description = DerivedProperty(\"Pickles each element",
"# | Copyright <NAME> 2020-21 # | Distributed under the Boost Software License,",
"Plugin object\"\"\" import os import pickle import sys import CommonEnvironment from CommonEnvironment.Interface import",
"element to a file\") Flags = DerivedProperty(ParseFlag.AllFlags) # ---------------------------------------------------------------------- # | Public Methods",
"[] # ---------------------------------------------------------------------- @staticmethod @override def GenerateOutputFilenames(context): # Return a single item (that",
"status_dm: generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert os.path.isdir(generator_path), generator_path generator_path = os.path.dirname(generator_path) with open(output_filenames[1], \"w\")",
"at # | http://www.boost.org/LICENSE_1_0.txt. # | # ---------------------------------------------------------------------- \"\"\"Contains the Plugin object\"\"\" import",
"os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert os.path.isdir(generator_path), generator_path generator_path = os.path.dirname(generator_path) with open(output_filenames[1], \"w\") as f: f.write(generator_path)",
"Methods @staticmethod @override def IsValidEnvironment(): return True # ---------------------------------------------------------------------- @staticmethod @override def GenerateCustomSettingsAndDefaults():",
"# ---------------------------------------------------------------------- @staticmethod @override def GenerateCustomSettingsAndDefaults(): return [] # ---------------------------------------------------------------------- @staticmethod @override def",
"# ---------------------------------------------------------------------- # | Public Methods @staticmethod @override def IsValidEnvironment(): return True #",
"\"path\"]] # ---------------------------------------------------------------------- @staticmethod @override def Generate( simple_schema_generator, invoke_reason, input_filenames, output_filenames, name, elements,",
"open(output_filenames[0], \"wb\") as f: pickle.dump(elements, f) # Path status_stream.write(\"Creating '{}'...\".format(output_filenames[1])) with status_stream.DoneManager() as",
"Boost Software License, Version 1.0. See # | accompanying file LICENSE_1_0.txt or copy",
"aren't supported. return [\"{}.{}\".format(context[\"output_name\"], ext) for ext in [\"pickle\", \"path\"]] # ---------------------------------------------------------------------- @staticmethod",
"name, elements, include_indexes, status_stream, verbose_stream, verbose, **custom_settings ): assert len(output_filenames) == 2, output_filenames",
"Plugin(PluginBase): # ---------------------------------------------------------------------- # | Public Properties Name = DerivedProperty(\"Pickle\") Description = DerivedProperty(\"Pickles",
"---------------------------------------------------------------------- @staticmethod @override def GenerateCustomSettingsAndDefaults(): return [] # ---------------------------------------------------------------------- @staticmethod @override def GenerateOutputFilenames(context):",
"@override def GenerateOutputFilenames(context): # Return a single item (that will never be used),",
"import pickle import sys import CommonEnvironment from CommonEnvironment.Interface import staticderived, override, DerivedProperty from",
"os.path.split(_script_fullpath) # ---------------------------------------------------------------------- with InitRelativeImports(): from ..Plugin import Plugin as PluginBase, ParseFlag, Extension",
"---------------------------------------------------------------------- @staticderived class Plugin(PluginBase): # ---------------------------------------------------------------------- # | Public Properties Name = DerivedProperty(\"Pickle\")",
"or copy at # | http://www.boost.org/LICENSE_1_0.txt. # | # ---------------------------------------------------------------------- \"\"\"Contains the Plugin",
"http://www.boost.org/LICENSE_1_0.txt. # | # ---------------------------------------------------------------------- \"\"\"Contains the Plugin object\"\"\" import os import pickle",
"| PicklePlugin.py # | # | <NAME> <<EMAIL>> # | 2020-07-24 15:08:32 #",
"2020-07-24 15:08:32 # | # ---------------------------------------------------------------------- # | # | Copyright <NAME> 2020-21",
"@staticderived class Plugin(PluginBase): # ---------------------------------------------------------------------- # | Public Properties Name = DerivedProperty(\"Pickle\") Description",
"= DerivedProperty(\"Pickles each element to a file\") Flags = DerivedProperty(ParseFlag.AllFlags) # ---------------------------------------------------------------------- #",
"@override def IsValidEnvironment(): return True # ---------------------------------------------------------------------- @staticmethod @override def GenerateCustomSettingsAndDefaults(): return []",
"generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert os.path.isdir(generator_path), generator_path generator_path = os.path.dirname(generator_path) with open(output_filenames[1], \"w\") as",
"len(output_filenames) == 2, output_filenames # Pickle status_stream.write(\"Creating '{}'...\".format(output_filenames[0])) with status_stream.DoneManager() as status_dm: with",
"pickle import sys import CommonEnvironment from CommonEnvironment.Interface import staticderived, override, DerivedProperty from CommonEnvironmentEx.Package",
"LICENSE_1_0.txt or copy at # | http://www.boost.org/LICENSE_1_0.txt. # | # ---------------------------------------------------------------------- \"\"\"Contains the",
"True # ---------------------------------------------------------------------- @staticmethod @override def GenerateCustomSettingsAndDefaults(): return [] # ---------------------------------------------------------------------- @staticmethod @override",
"import CommonEnvironment from CommonEnvironment.Interface import staticderived, override, DerivedProperty from CommonEnvironmentEx.Package import InitRelativeImports #",
"# aren't supported. return [\"{}.{}\".format(context[\"output_name\"], ext) for ext in [\"pickle\", \"path\"]] # ----------------------------------------------------------------------",
"= os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert os.path.isdir(generator_path), generator_path generator_path = os.path.dirname(generator_path) with open(output_filenames[1], \"w\") as f:",
"Extension # ---------------------------------------------------------------------- @staticderived class Plugin(PluginBase): # ---------------------------------------------------------------------- # | Public Properties Name",
"ParseFlag, Extension # ---------------------------------------------------------------------- @staticderived class Plugin(PluginBase): # ---------------------------------------------------------------------- # | Public Properties",
"sys import CommonEnvironment from CommonEnvironment.Interface import staticderived, override, DerivedProperty from CommonEnvironmentEx.Package import InitRelativeImports",
"under the Boost Software License, Version 1.0. See # | accompanying file LICENSE_1_0.txt",
"<NAME> 2020-21 # | Distributed under the Boost Software License, Version 1.0. See",
"[\"pickle\", \"path\"]] # ---------------------------------------------------------------------- @staticmethod @override def Generate( simple_schema_generator, invoke_reason, input_filenames, output_filenames, name,",
"with InitRelativeImports(): from ..Plugin import Plugin as PluginBase, ParseFlag, Extension # ---------------------------------------------------------------------- @staticderived",
"@staticmethod @override def Generate( simple_schema_generator, invoke_reason, input_filenames, output_filenames, name, elements, include_indexes, status_stream, verbose_stream,",
"Software License, Version 1.0. See # | accompanying file LICENSE_1_0.txt or copy at",
"Properties Name = DerivedProperty(\"Pickle\") Description = DerivedProperty(\"Pickles each element to a file\") Flags",
"2, output_filenames # Pickle status_stream.write(\"Creating '{}'...\".format(output_filenames[0])) with status_stream.DoneManager() as status_dm: with open(output_filenames[0], \"wb\")",
"# ---------------------------------------------------------------------- @staticmethod @override def Generate( simple_schema_generator, invoke_reason, input_filenames, output_filenames, name, elements, include_indexes,",
"@staticmethod @override def GenerateCustomSettingsAndDefaults(): return [] # ---------------------------------------------------------------------- @staticmethod @override def GenerateOutputFilenames(context): #",
"[\"{}.{}\".format(context[\"output_name\"], ext) for ext in [\"pickle\", \"path\"]] # ---------------------------------------------------------------------- @staticmethod @override def Generate(",
"staticderived, override, DerivedProperty from CommonEnvironmentEx.Package import InitRelativeImports # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir,",
"'{}'...\".format(output_filenames[1])) with status_stream.DoneManager() as status_dm: generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert os.path.isdir(generator_path), generator_path generator_path =",
"# ---------------------------------------------------------------------- @staticmethod @override def GenerateOutputFilenames(context): # Return a single item (that will",
"| http://www.boost.org/LICENSE_1_0.txt. # | # ---------------------------------------------------------------------- \"\"\"Contains the Plugin object\"\"\" import os import",
"input_filenames, output_filenames, name, elements, include_indexes, status_stream, verbose_stream, verbose, **custom_settings ): assert len(output_filenames) ==",
"| # | PicklePlugin.py # | # | <NAME> <<EMAIL>> # | 2020-07-24",
"---------------------------------------------------------------------- # | # | PicklePlugin.py # | # | <NAME> <<EMAIL>> #",
"Generate( simple_schema_generator, invoke_reason, input_filenames, output_filenames, name, elements, include_indexes, status_stream, verbose_stream, verbose, **custom_settings ):",
"# | Public Methods @staticmethod @override def IsValidEnvironment(): return True # ---------------------------------------------------------------------- @staticmethod",
"# ---------------------------------------------------------------------- # | # | Copyright <NAME> 2020-21 # | Distributed under",
"# | Distributed under the Boost Software License, Version 1.0. See # |",
"(that will never be used), as an empty lists # aren't supported. return",
"Public Methods @staticmethod @override def IsValidEnvironment(): return True # ---------------------------------------------------------------------- @staticmethod @override def",
"with status_stream.DoneManager() as status_dm: with open(output_filenames[0], \"wb\") as f: pickle.dump(elements, f) # Path",
"# ---------------------------------------------------------------------- # | # | PicklePlugin.py # | # | <NAME> <<EMAIL>>",
"| # | Copyright <NAME> 2020-21 # | Distributed under the Boost Software",
"Version 1.0. See # | accompanying file LICENSE_1_0.txt or copy at # |",
"See # | accompanying file LICENSE_1_0.txt or copy at # | http://www.boost.org/LICENSE_1_0.txt. #",
"def GenerateCustomSettingsAndDefaults(): return [] # ---------------------------------------------------------------------- @staticmethod @override def GenerateOutputFilenames(context): # Return a",
"<<EMAIL>> # | 2020-07-24 15:08:32 # | # ---------------------------------------------------------------------- # | # |",
"# | 2020-07-24 15:08:32 # | # ---------------------------------------------------------------------- # | # | Copyright",
"Flags = DerivedProperty(ParseFlag.AllFlags) # ---------------------------------------------------------------------- # | Public Methods @staticmethod @override def IsValidEnvironment():",
"Distributed under the Boost Software License, Version 1.0. See # | accompanying file",
"import Plugin as PluginBase, ParseFlag, Extension # ---------------------------------------------------------------------- @staticderived class Plugin(PluginBase): # ----------------------------------------------------------------------",
"accompanying file LICENSE_1_0.txt or copy at # | http://www.boost.org/LICENSE_1_0.txt. # | # ----------------------------------------------------------------------",
"single item (that will never be used), as an empty lists # aren't",
"from CommonEnvironmentEx.Package import InitRelativeImports # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath)",
"| Distributed under the Boost Software License, Version 1.0. See # | accompanying",
"def Generate( simple_schema_generator, invoke_reason, input_filenames, output_filenames, name, elements, include_indexes, status_stream, verbose_stream, verbose, **custom_settings",
"---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- with InitRelativeImports(): from",
"supported. return [\"{}.{}\".format(context[\"output_name\"], ext) for ext in [\"pickle\", \"path\"]] # ---------------------------------------------------------------------- @staticmethod @override",
"DerivedProperty(\"Pickle\") Description = DerivedProperty(\"Pickles each element to a file\") Flags = DerivedProperty(ParseFlag.AllFlags) #",
"file\") Flags = DerivedProperty(ParseFlag.AllFlags) # ---------------------------------------------------------------------- # | Public Methods @staticmethod @override def",
"PluginBase, ParseFlag, Extension # ---------------------------------------------------------------------- @staticderived class Plugin(PluginBase): # ---------------------------------------------------------------------- # | Public",
"class Plugin(PluginBase): # ---------------------------------------------------------------------- # | Public Properties Name = DerivedProperty(\"Pickle\") Description =",
"# ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- with InitRelativeImports():",
"| # ---------------------------------------------------------------------- # | # | Copyright <NAME> 2020-21 # | Distributed",
"invoke_reason, input_filenames, output_filenames, name, elements, include_indexes, status_stream, verbose_stream, verbose, **custom_settings ): assert len(output_filenames)",
"DerivedProperty(\"Pickles each element to a file\") Flags = DerivedProperty(ParseFlag.AllFlags) # ---------------------------------------------------------------------- # |",
"pickle.dump(elements, f) # Path status_stream.write(\"Creating '{}'...\".format(output_filenames[1])) with status_stream.DoneManager() as status_dm: generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename)",
"= DerivedProperty(\"Pickle\") Description = DerivedProperty(\"Pickles each element to a file\") Flags = DerivedProperty(ParseFlag.AllFlags)",
"DerivedProperty from CommonEnvironmentEx.Package import InitRelativeImports # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name =",
"status_dm: with open(output_filenames[0], \"wb\") as f: pickle.dump(elements, f) # Path status_stream.write(\"Creating '{}'...\".format(output_filenames[1])) with",
"| # ---------------------------------------------------------------------- \"\"\"Contains the Plugin object\"\"\" import os import pickle import sys",
"@staticmethod @override def GenerateOutputFilenames(context): # Return a single item (that will never be",
"Name = DerivedProperty(\"Pickle\") Description = DerivedProperty(\"Pickles each element to a file\") Flags =",
"def GenerateOutputFilenames(context): # Return a single item (that will never be used), as",
"**custom_settings ): assert len(output_filenames) == 2, output_filenames # Pickle status_stream.write(\"Creating '{}'...\".format(output_filenames[0])) with status_stream.DoneManager()",
"a file\") Flags = DerivedProperty(ParseFlag.AllFlags) # ---------------------------------------------------------------------- # | Public Methods @staticmethod @override",
"status_stream.DoneManager() as status_dm: generator_path = os.path.dirname(simple_schema_generator.OriginalModuleFilename) assert os.path.isdir(generator_path), generator_path generator_path = os.path.dirname(generator_path) with",
"@override def Generate( simple_schema_generator, invoke_reason, input_filenames, output_filenames, name, elements, include_indexes, status_stream, verbose_stream, verbose,"
] |
[
"time.perf_counter() print(\"@timefn: {} took {} seconds\".format(fn.__name__, t2 - t1)) return result return measure_time",
"import time from functools import wraps def timefn(fn): @wraps(fn) def measure_time(*args, **kwargs): t1",
"@timefn def profiling_factorial(): value = 10 result = factorial(value) print(\"10!= \" + str(result))",
"return 1 else: print(\"Caluating \" + str(num) + \"!\") return factorial(num -1) *",
"wraps def timefn(fn): @wraps(fn) def measure_time(*args, **kwargs): t1 = time.perf_counter() result = fn(*args,",
"<gh_stars>1-10 # calculate 10! import time from functools import wraps def timefn(fn): @wraps(fn)",
"= 10 result = factorial(value) print(\"10!= \" + str(result)) if __name__ == \"__main__\":",
"fn(*args, **kwargs) t2 = time.perf_counter() print(\"@timefn: {} took {} seconds\".format(fn.__name__, t2 - t1))",
"measure_time def factorial(num): if num == 1: return 1 else: print(\"Caluating \" +",
"def factorial(num): if num == 1: return 1 else: print(\"Caluating \" + str(num)",
"took {} seconds\".format(fn.__name__, t2 - t1)) return result return measure_time def factorial(num): if",
"return factorial(num -1) * num @timefn def profiling_factorial(): value = 10 result =",
"t2 = time.perf_counter() print(\"@timefn: {} took {} seconds\".format(fn.__name__, t2 - t1)) return result",
"seconds\".format(fn.__name__, t2 - t1)) return result return measure_time def factorial(num): if num ==",
"if num == 1: return 1 else: print(\"Caluating \" + str(num) + \"!\")",
"import wraps def timefn(fn): @wraps(fn) def measure_time(*args, **kwargs): t1 = time.perf_counter() result =",
"10 result = factorial(value) print(\"10!= \" + str(result)) if __name__ == \"__main__\": profiling_factorial()",
"timefn(fn): @wraps(fn) def measure_time(*args, **kwargs): t1 = time.perf_counter() result = fn(*args, **kwargs) t2",
"calculate 10! import time from functools import wraps def timefn(fn): @wraps(fn) def measure_time(*args,",
"num == 1: return 1 else: print(\"Caluating \" + str(num) + \"!\") return",
"t1 = time.perf_counter() result = fn(*args, **kwargs) t2 = time.perf_counter() print(\"@timefn: {} took",
"factorial(num): if num == 1: return 1 else: print(\"Caluating \" + str(num) +",
"print(\"@timefn: {} took {} seconds\".format(fn.__name__, t2 - t1)) return result return measure_time def",
"factorial(num -1) * num @timefn def profiling_factorial(): value = 10 result = factorial(value)",
"result = fn(*args, **kwargs) t2 = time.perf_counter() print(\"@timefn: {} took {} seconds\".format(fn.__name__, t2",
"def measure_time(*args, **kwargs): t1 = time.perf_counter() result = fn(*args, **kwargs) t2 = time.perf_counter()",
"**kwargs) t2 = time.perf_counter() print(\"@timefn: {} took {} seconds\".format(fn.__name__, t2 - t1)) return",
"return measure_time def factorial(num): if num == 1: return 1 else: print(\"Caluating \"",
"print(\"Caluating \" + str(num) + \"!\") return factorial(num -1) * num @timefn def",
"\"!\") return factorial(num -1) * num @timefn def profiling_factorial(): value = 10 result",
"functools import wraps def timefn(fn): @wraps(fn) def measure_time(*args, **kwargs): t1 = time.perf_counter() result",
"time from functools import wraps def timefn(fn): @wraps(fn) def measure_time(*args, **kwargs): t1 =",
"\" + str(num) + \"!\") return factorial(num -1) * num @timefn def profiling_factorial():",
"else: print(\"Caluating \" + str(num) + \"!\") return factorial(num -1) * num @timefn",
"measure_time(*args, **kwargs): t1 = time.perf_counter() result = fn(*args, **kwargs) t2 = time.perf_counter() print(\"@timefn:",
"{} seconds\".format(fn.__name__, t2 - t1)) return result return measure_time def factorial(num): if num",
"num @timefn def profiling_factorial(): value = 10 result = factorial(value) print(\"10!= \" +",
"10! import time from functools import wraps def timefn(fn): @wraps(fn) def measure_time(*args, **kwargs):",
"= fn(*args, **kwargs) t2 = time.perf_counter() print(\"@timefn: {} took {} seconds\".format(fn.__name__, t2 -",
"1 else: print(\"Caluating \" + str(num) + \"!\") return factorial(num -1) * num",
"@wraps(fn) def measure_time(*args, **kwargs): t1 = time.perf_counter() result = fn(*args, **kwargs) t2 =",
"from functools import wraps def timefn(fn): @wraps(fn) def measure_time(*args, **kwargs): t1 = time.perf_counter()",
"**kwargs): t1 = time.perf_counter() result = fn(*args, **kwargs) t2 = time.perf_counter() print(\"@timefn: {}",
"{} took {} seconds\".format(fn.__name__, t2 - t1)) return result return measure_time def factorial(num):",
"return result return measure_time def factorial(num): if num == 1: return 1 else:",
"== 1: return 1 else: print(\"Caluating \" + str(num) + \"!\") return factorial(num",
"t1)) return result return measure_time def factorial(num): if num == 1: return 1",
"1: return 1 else: print(\"Caluating \" + str(num) + \"!\") return factorial(num -1)",
"* num @timefn def profiling_factorial(): value = 10 result = factorial(value) print(\"10!= \"",
"# calculate 10! import time from functools import wraps def timefn(fn): @wraps(fn) def",
"time.perf_counter() result = fn(*args, **kwargs) t2 = time.perf_counter() print(\"@timefn: {} took {} seconds\".format(fn.__name__,",
"+ \"!\") return factorial(num -1) * num @timefn def profiling_factorial(): value = 10",
"value = 10 result = factorial(value) print(\"10!= \" + str(result)) if __name__ ==",
"t2 - t1)) return result return measure_time def factorial(num): if num == 1:",
"- t1)) return result return measure_time def factorial(num): if num == 1: return",
"def profiling_factorial(): value = 10 result = factorial(value) print(\"10!= \" + str(result)) if",
"= time.perf_counter() result = fn(*args, **kwargs) t2 = time.perf_counter() print(\"@timefn: {} took {}",
"def timefn(fn): @wraps(fn) def measure_time(*args, **kwargs): t1 = time.perf_counter() result = fn(*args, **kwargs)",
"result return measure_time def factorial(num): if num == 1: return 1 else: print(\"Caluating",
"str(num) + \"!\") return factorial(num -1) * num @timefn def profiling_factorial(): value =",
"-1) * num @timefn def profiling_factorial(): value = 10 result = factorial(value) print(\"10!=",
"= time.perf_counter() print(\"@timefn: {} took {} seconds\".format(fn.__name__, t2 - t1)) return result return",
"profiling_factorial(): value = 10 result = factorial(value) print(\"10!= \" + str(result)) if __name__",
"+ str(num) + \"!\") return factorial(num -1) * num @timefn def profiling_factorial(): value"
] |
[
"\"\" langs = \"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str = \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\" PYTHON_EXECUTABLE = \"/usr/bin/python3\" package_has_static_sources = ''",
"= \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg/Distance.msg\" services_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv\" pkg_name = \"simple_applications\" dependencies_str = \"\" langs =",
"# generated from genmsg/cmake/pkg-genmsg.context.in messages_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg/Distance.msg\" services_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv\" pkg_name = \"simple_applications\"",
"langs = \"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str = \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\" PYTHON_EXECUTABLE = \"/usr/bin/python3\" package_has_static_sources = '' ==",
"dependencies_str = \"\" langs = \"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str = \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\" PYTHON_EXECUTABLE = \"/usr/bin/python3\" package_has_static_sources",
"genmsg/cmake/pkg-genmsg.context.in messages_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg/Distance.msg\" services_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv\" pkg_name = \"simple_applications\" dependencies_str = \"\"",
"dep_include_paths_str = \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\" PYTHON_EXECUTABLE = \"/usr/bin/python3\" package_has_static_sources = '' == 'TRUE' genmsg_check_deps_script =",
"pkg_name = \"simple_applications\" dependencies_str = \"\" langs = \"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str = \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\" PYTHON_EXECUTABLE",
"= \"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str = \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\" PYTHON_EXECUTABLE = \"/usr/bin/python3\" package_has_static_sources = '' == 'TRUE'",
"\"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str = \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\" PYTHON_EXECUTABLE = \"/usr/bin/python3\" package_has_static_sources = '' == 'TRUE' genmsg_check_deps_script",
"= \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv\" pkg_name = \"simple_applications\" dependencies_str = \"\" langs = \"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str =",
"= \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\" PYTHON_EXECUTABLE = \"/usr/bin/python3\" package_has_static_sources = '' == 'TRUE' genmsg_check_deps_script = \"/opt/ros/noetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py\"",
"services_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv\" pkg_name = \"simple_applications\" dependencies_str = \"\" langs = \"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str",
"= \"\" langs = \"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str = \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\" PYTHON_EXECUTABLE = \"/usr/bin/python3\" package_has_static_sources =",
"= \"simple_applications\" dependencies_str = \"\" langs = \"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str = \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\" PYTHON_EXECUTABLE =",
"generated from genmsg/cmake/pkg-genmsg.context.in messages_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg/Distance.msg\" services_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv\" pkg_name = \"simple_applications\" dependencies_str",
"messages_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg/Distance.msg\" services_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv\" pkg_name = \"simple_applications\" dependencies_str = \"\" langs",
"\"simple_applications\" dependencies_str = \"\" langs = \"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str = \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\" PYTHON_EXECUTABLE = \"/usr/bin/python3\"",
"from genmsg/cmake/pkg-genmsg.context.in messages_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg/Distance.msg\" services_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv\" pkg_name = \"simple_applications\" dependencies_str =",
"\"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg/Distance.msg\" services_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv\" pkg_name = \"simple_applications\" dependencies_str = \"\" langs = \"gencpp;geneus;genlisp;gennodejs;genpy\"",
"<filename>catkin_ws/build/simple_applications/cmake/simple_applications-genmsg-context.py # generated from genmsg/cmake/pkg-genmsg.context.in messages_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg/Distance.msg\" services_str = \"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv\" pkg_name =",
"\"/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/srv/CircularMotion.srv\" pkg_name = \"simple_applications\" dependencies_str = \"\" langs = \"gencpp;geneus;genlisp;gennodejs;genpy\" dep_include_paths_str = \"simple_applications;/home/yildiz/GitRepos/ROS_Samples/catkin_ws/src/simple_applications/msg\""
] |
[
"0: return [] x = list(things) return [i for i in x[max(len(x)-num_items, 0):]]",
"tail(things, num_items): if num_items <= 0: return [] x = list(things) return [i",
"if num_items <= 0: return [] x = list(things) return [i for i",
"num_items <= 0: return [] x = list(things) return [i for i in",
"num_items): if num_items <= 0: return [] x = list(things) return [i for",
"def tail(things, num_items): if num_items <= 0: return [] x = list(things) return",
"<= 0: return [] x = list(things) return [i for i in x[max(len(x)-num_items,"
] |
[
"= DataArray(range(5), name=\"tiny\").to_dataset() assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache =",
"ds = tutorial.open_dataset(self.testfile).load() tiny = DataArray(range(5), name=\"tiny\").to_dataset() assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch):",
"monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(self.testfile, cache=False).load() ds_cache = tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache, ds_nocache) def test_download_rasterio_from_github_load_without_cache(",
"= tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache, ds_nocache) def test_download_rasterio_from_github_load_without_cache( self, tmp_path, monkeypatch ): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache",
"<reponame>apkrelling/xarray import os import pytest from xarray import DataArray, tutorial from . import",
"tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache, ds_nocache) def test_download_rasterio_from_github_load_without_cache( self, tmp_path, monkeypatch ): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache =",
"): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(\"RGB.byte\", cache=False).load() ds_cache = tutorial.open_dataset(\"RGB.byte\", cache=True).load() assert_identical(ds_cache, ds_nocache)",
"assert_identical(ds_cache, ds_nocache) def test_download_rasterio_from_github_load_without_cache( self, tmp_path, monkeypatch ): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(\"RGB.byte\",",
"DataArray(range(5), name=\"tiny\").to_dataset() assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(self.testfile,",
"def setUp(self): self.testfile = \"tiny\" def test_download_from_github(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds =",
"tmp_path, monkeypatch ): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(\"RGB.byte\", cache=False).load() ds_cache = tutorial.open_dataset(\"RGB.byte\", cache=True).load()",
"ds_nocache = tutorial.open_dataset(self.testfile, cache=False).load() ds_cache = tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache, ds_nocache) def test_download_rasterio_from_github_load_without_cache( self, tmp_path,",
"setUp(self): self.testfile = \"tiny\" def test_download_from_github(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds = tutorial.open_dataset(self.testfile).load()",
"ds_cache = tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache, ds_nocache) def test_download_rasterio_from_github_load_without_cache( self, tmp_path, monkeypatch ): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path))",
"test_download_from_github_load_without_cache(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(self.testfile, cache=False).load() ds_cache = tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache,",
"@pytest.fixture(autouse=True) def setUp(self): self.testfile = \"tiny\" def test_download_from_github(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds",
"tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(self.testfile, cache=False).load() ds_cache = tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache, ds_nocache)",
"xarray import DataArray, tutorial from . import assert_identical, network @network class TestLoadDataset: @pytest.fixture(autouse=True)",
"tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds = tutorial.open_dataset(self.testfile).load() tiny = DataArray(range(5), name=\"tiny\").to_dataset() assert_identical(ds, tiny)",
"monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds = tutorial.open_dataset(self.testfile).load() tiny = DataArray(range(5), name=\"tiny\").to_dataset() assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self,",
"import os import pytest from xarray import DataArray, tutorial from . import assert_identical,",
"= tutorial.open_dataset(self.testfile).load() tiny = DataArray(range(5), name=\"tiny\").to_dataset() assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\",",
"import pytest from xarray import DataArray, tutorial from . import assert_identical, network @network",
"tutorial from . import assert_identical, network @network class TestLoadDataset: @pytest.fixture(autouse=True) def setUp(self): self.testfile",
"@network class TestLoadDataset: @pytest.fixture(autouse=True) def setUp(self): self.testfile = \"tiny\" def test_download_from_github(self, tmp_path, monkeypatch):",
"= \"tiny\" def test_download_from_github(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds = tutorial.open_dataset(self.testfile).load() tiny =",
"tiny) def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(self.testfile, cache=False).load() ds_cache =",
"TestLoadDataset: @pytest.fixture(autouse=True) def setUp(self): self.testfile = \"tiny\" def test_download_from_github(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path))",
"os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(self.testfile, cache=False).load() ds_cache = tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache, ds_nocache) def test_download_rasterio_from_github_load_without_cache( self,",
"test_download_rasterio_from_github_load_without_cache( self, tmp_path, monkeypatch ): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(\"RGB.byte\", cache=False).load() ds_cache =",
"class TestLoadDataset: @pytest.fixture(autouse=True) def setUp(self): self.testfile = \"tiny\" def test_download_from_github(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\",",
"\"tiny\" def test_download_from_github(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds = tutorial.open_dataset(self.testfile).load() tiny = DataArray(range(5),",
"import DataArray, tutorial from . import assert_identical, network @network class TestLoadDataset: @pytest.fixture(autouse=True) def",
"self, tmp_path, monkeypatch ): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(\"RGB.byte\", cache=False).load() ds_cache = tutorial.open_dataset(\"RGB.byte\",",
"os import pytest from xarray import DataArray, tutorial from . import assert_identical, network",
"monkeypatch ): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(\"RGB.byte\", cache=False).load() ds_cache = tutorial.open_dataset(\"RGB.byte\", cache=True).load() assert_identical(ds_cache,",
"import assert_identical, network @network class TestLoadDataset: @pytest.fixture(autouse=True) def setUp(self): self.testfile = \"tiny\" def",
"ds_nocache) def test_download_rasterio_from_github_load_without_cache( self, tmp_path, monkeypatch ): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(\"RGB.byte\", cache=False).load()",
"def test_download_rasterio_from_github_load_without_cache( self, tmp_path, monkeypatch ): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(\"RGB.byte\", cache=False).load() ds_cache",
"def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(self.testfile, cache=False).load() ds_cache = tutorial.open_dataset(self.testfile).load()",
"monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(self.testfile, cache=False).load() ds_cache = tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache, ds_nocache) def",
"from . import assert_identical, network @network class TestLoadDataset: @pytest.fixture(autouse=True) def setUp(self): self.testfile =",
"tiny = DataArray(range(5), name=\"tiny\").to_dataset() assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache",
"tutorial.open_dataset(self.testfile).load() tiny = DataArray(range(5), name=\"tiny\").to_dataset() assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path))",
"network @network class TestLoadDataset: @pytest.fixture(autouse=True) def setUp(self): self.testfile = \"tiny\" def test_download_from_github(self, tmp_path,",
"monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds = tutorial.open_dataset(self.testfile).load() tiny = DataArray(range(5), name=\"tiny\").to_dataset() assert_identical(ds, tiny) def",
"os.fspath(tmp_path)) ds = tutorial.open_dataset(self.testfile).load() tiny = DataArray(range(5), name=\"tiny\").to_dataset() assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self, tmp_path,",
". import assert_identical, network @network class TestLoadDataset: @pytest.fixture(autouse=True) def setUp(self): self.testfile = \"tiny\"",
"pytest from xarray import DataArray, tutorial from . import assert_identical, network @network class",
"from xarray import DataArray, tutorial from . import assert_identical, network @network class TestLoadDataset:",
"assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(self.testfile, cache=False).load() ds_cache",
"assert_identical, network @network class TestLoadDataset: @pytest.fixture(autouse=True) def setUp(self): self.testfile = \"tiny\" def test_download_from_github(self,",
"tutorial.open_dataset(self.testfile, cache=False).load() ds_cache = tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache, ds_nocache) def test_download_rasterio_from_github_load_without_cache( self, tmp_path, monkeypatch ):",
"test_download_from_github(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds = tutorial.open_dataset(self.testfile).load() tiny = DataArray(range(5), name=\"tiny\").to_dataset() assert_identical(ds,",
"= tutorial.open_dataset(self.testfile, cache=False).load() ds_cache = tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache, ds_nocache) def test_download_rasterio_from_github_load_without_cache( self, tmp_path, monkeypatch",
"cache=False).load() ds_cache = tutorial.open_dataset(self.testfile).load() assert_identical(ds_cache, ds_nocache) def test_download_rasterio_from_github_load_without_cache( self, tmp_path, monkeypatch ): monkeypatch.setenv(\"XDG_CACHE_DIR\",",
"self.testfile = \"tiny\" def test_download_from_github(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds = tutorial.open_dataset(self.testfile).load() tiny",
"def test_download_from_github(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds = tutorial.open_dataset(self.testfile).load() tiny = DataArray(range(5), name=\"tiny\").to_dataset()",
"name=\"tiny\").to_dataset() assert_identical(ds, tiny) def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch): monkeypatch.setenv(\"XDG_CACHE_DIR\", os.fspath(tmp_path)) ds_nocache = tutorial.open_dataset(self.testfile, cache=False).load()",
"DataArray, tutorial from . import assert_identical, network @network class TestLoadDataset: @pytest.fixture(autouse=True) def setUp(self):"
] |
[
"import argparse import json from pathlib import Path DS_VERSION = \"2018.04.18\" LOCAL_QANTA_PREFIX =",
"def main(): parser = argparse.ArgumentParser() parser.add_argument('output_dir', type=str) args = parser.parse_args() output_dir = Path(args.output_dir)",
"q['qanta_id'], 'question': q['text'], 'answer': q['page'], 'context': ''}) with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w') as",
"path) as f: data = json.load(f) output = [] for q in data['questions']:",
"parents=True) for split, path in [('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]: with open(Path(LOCAL_QANTA_PREFIX)",
"/ path) as f: data = json.load(f) output = [] for q in",
"[('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]: with open(Path(LOCAL_QANTA_PREFIX) / path) as f: data",
"QANTA_TRAIN_DATASET_PATH = f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH = f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH = f\"qanta.test.{DS_VERSION}.json\" def main(): parser =",
"QANTA_TEST_DATASET_PATH = f\"qanta.test.{DS_VERSION}.json\" def main(): parser = argparse.ArgumentParser() parser.add_argument('output_dir', type=str) args = parser.parse_args()",
"f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH = f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH = f\"qanta.test.{DS_VERSION}.json\" def main(): parser = argparse.ArgumentParser() parser.add_argument('output_dir',",
"q in data['questions']: output.append({'uid': q['qanta_id'], 'question': q['text'], 'answer': q['page'], 'context': ''}) with open(output_dir",
"open(Path(LOCAL_QANTA_PREFIX) / path) as f: data = json.load(f) output = [] for q",
"'answer': q['page'], 'context': ''}) with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f: for r",
"DS_VERSION = \"2018.04.18\" LOCAL_QANTA_PREFIX = \"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH = f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH = f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH",
"as f: data = json.load(f) output = [] for q in data['questions']: output.append({'uid':",
"argparse import json from pathlib import Path DS_VERSION = \"2018.04.18\" LOCAL_QANTA_PREFIX = \"data/external/datasets/\"",
"output.append({'uid': q['qanta_id'], 'question': q['text'], 'answer': q['page'], 'context': ''}) with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w')",
"('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]: with open(Path(LOCAL_QANTA_PREFIX) / path) as f: data = json.load(f)",
"with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f: for r in output: f.write(f'{json.dumps(r)}\\n') if",
"= json.load(f) output = [] for q in data['questions']: output.append({'uid': q['qanta_id'], 'question': q['text'],",
"f\"qanta.test.{DS_VERSION}.json\" def main(): parser = argparse.ArgumentParser() parser.add_argument('output_dir', type=str) args = parser.parse_args() output_dir =",
"LOCAL_QANTA_PREFIX = \"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH = f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH = f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH = f\"qanta.test.{DS_VERSION}.json\" def",
"in [('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]: with open(Path(LOCAL_QANTA_PREFIX) / path) as f:",
"json from pathlib import Path DS_VERSION = \"2018.04.18\" LOCAL_QANTA_PREFIX = \"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH =",
"= parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(exist_ok=True, parents=True) for split, path in [('train', QANTA_TRAIN_DATASET_PATH),",
"with open(Path(LOCAL_QANTA_PREFIX) / path) as f: data = json.load(f) output = [] for",
"q['page'], 'context': ''}) with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f: for r in",
"import Path DS_VERSION = \"2018.04.18\" LOCAL_QANTA_PREFIX = \"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH = f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH =",
"parser = argparse.ArgumentParser() parser.add_argument('output_dir', type=str) args = parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(exist_ok=True, parents=True)",
"import json from pathlib import Path DS_VERSION = \"2018.04.18\" LOCAL_QANTA_PREFIX = \"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH",
"= [] for q in data['questions']: output.append({'uid': q['qanta_id'], 'question': q['text'], 'answer': q['page'], 'context':",
"parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(exist_ok=True, parents=True) for split, path in [('train', QANTA_TRAIN_DATASET_PATH), ('dev',",
"output_dir.mkdir(exist_ok=True, parents=True) for split, path in [('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]: with",
"\"2018.04.18\" LOCAL_QANTA_PREFIX = \"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH = f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH = f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH = f\"qanta.test.{DS_VERSION}.json\"",
"QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]: with open(Path(LOCAL_QANTA_PREFIX) / path) as f: data =",
"for split, path in [('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]: with open(Path(LOCAL_QANTA_PREFIX) /",
"f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f: for r in output: f.write(f'{json.dumps(r)}\\n') if __name__ == '__main__':",
"''}) with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f: for r in output: f.write(f'{json.dumps(r)}\\n')",
"QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]: with open(Path(LOCAL_QANTA_PREFIX) / path) as f: data = json.load(f) output",
"<gh_stars>100-1000 import argparse import json from pathlib import Path DS_VERSION = \"2018.04.18\" LOCAL_QANTA_PREFIX",
"argparse.ArgumentParser() parser.add_argument('output_dir', type=str) args = parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(exist_ok=True, parents=True) for split,",
"/ f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f: for r in output: f.write(f'{json.dumps(r)}\\n') if __name__ ==",
"pathlib import Path DS_VERSION = \"2018.04.18\" LOCAL_QANTA_PREFIX = \"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH = f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH",
"Path DS_VERSION = \"2018.04.18\" LOCAL_QANTA_PREFIX = \"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH = f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH = f\"qanta.dev.{DS_VERSION}.json\"",
"type=str) args = parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(exist_ok=True, parents=True) for split, path in",
"output_dir = Path(args.output_dir) output_dir.mkdir(exist_ok=True, parents=True) for split, path in [('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH),",
"'w') as f: for r in output: f.write(f'{json.dumps(r)}\\n') if __name__ == '__main__': main()",
"QANTA_DEV_DATASET_PATH = f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH = f\"qanta.test.{DS_VERSION}.json\" def main(): parser = argparse.ArgumentParser() parser.add_argument('output_dir', type=str)",
"'question': q['text'], 'answer': q['page'], 'context': ''}) with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f:",
"= f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH = f\"qanta.test.{DS_VERSION}.json\" def main(): parser = argparse.ArgumentParser() parser.add_argument('output_dir', type=str) args",
"Path(args.output_dir) output_dir.mkdir(exist_ok=True, parents=True) for split, path in [('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]:",
"parser.add_argument('output_dir', type=str) args = parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(exist_ok=True, parents=True) for split, path",
"= argparse.ArgumentParser() parser.add_argument('output_dir', type=str) args = parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(exist_ok=True, parents=True) for",
"= Path(args.output_dir) output_dir.mkdir(exist_ok=True, parents=True) for split, path in [('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test',",
"split, path in [('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]: with open(Path(LOCAL_QANTA_PREFIX) / path)",
"data['questions']: output.append({'uid': q['qanta_id'], 'question': q['text'], 'answer': q['page'], 'context': ''}) with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl',",
"main(): parser = argparse.ArgumentParser() parser.add_argument('output_dir', type=str) args = parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(exist_ok=True,",
"for q in data['questions']: output.append({'uid': q['qanta_id'], 'question': q['text'], 'answer': q['page'], 'context': ''}) with",
"= f\"qanta.test.{DS_VERSION}.json\" def main(): parser = argparse.ArgumentParser() parser.add_argument('output_dir', type=str) args = parser.parse_args() output_dir",
"f: data = json.load(f) output = [] for q in data['questions']: output.append({'uid': q['qanta_id'],",
"= f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH = f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH = f\"qanta.test.{DS_VERSION}.json\" def main(): parser = argparse.ArgumentParser()",
"in data['questions']: output.append({'uid': q['qanta_id'], 'question': q['text'], 'answer': q['page'], 'context': ''}) with open(output_dir /",
"open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f: for r in output: f.write(f'{json.dumps(r)}\\n') if __name__",
"f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH = f\"qanta.test.{DS_VERSION}.json\" def main(): parser = argparse.ArgumentParser() parser.add_argument('output_dir', type=str) args =",
"QANTA_TEST_DATASET_PATH)]: with open(Path(LOCAL_QANTA_PREFIX) / path) as f: data = json.load(f) output = []",
"'context': ''}) with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f: for r in output:",
"path in [('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]: with open(Path(LOCAL_QANTA_PREFIX) / path) as",
"output = [] for q in data['questions']: output.append({'uid': q['qanta_id'], 'question': q['text'], 'answer': q['page'],",
"= \"2018.04.18\" LOCAL_QANTA_PREFIX = \"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH = f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH = f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH =",
"args = parser.parse_args() output_dir = Path(args.output_dir) output_dir.mkdir(exist_ok=True, parents=True) for split, path in [('train',",
"('test', QANTA_TEST_DATASET_PATH)]: with open(Path(LOCAL_QANTA_PREFIX) / path) as f: data = json.load(f) output =",
"\"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH = f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH = f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH = f\"qanta.test.{DS_VERSION}.json\" def main(): parser",
"from pathlib import Path DS_VERSION = \"2018.04.18\" LOCAL_QANTA_PREFIX = \"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH = f\"qanta.train.{DS_VERSION}.json\"",
"[] for q in data['questions']: output.append({'uid': q['qanta_id'], 'question': q['text'], 'answer': q['page'], 'context': ''})",
"q['text'], 'answer': q['page'], 'context': ''}) with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f: for",
"= \"data/external/datasets/\" QANTA_TRAIN_DATASET_PATH = f\"qanta.train.{DS_VERSION}.json\" QANTA_DEV_DATASET_PATH = f\"qanta.dev.{DS_VERSION}.json\" QANTA_TEST_DATASET_PATH = f\"qanta.test.{DS_VERSION}.json\" def main():",
"json.load(f) output = [] for q in data['questions']: output.append({'uid': q['qanta_id'], 'question': q['text'], 'answer':",
"data = json.load(f) output = [] for q in data['questions']: output.append({'uid': q['qanta_id'], 'question':"
] |
[
"# 함수로 plus/minus/0 판별하기 def f(key): if key>0: print(\"plus\") elif key==0: print(\"zero\") else:",
"plus/minus/0 판별하기 def f(key): if key>0: print(\"plus\") elif key==0: print(\"zero\") else: print(\"minus\") f(int(input()))",
"함수로 plus/minus/0 판별하기 def f(key): if key>0: print(\"plus\") elif key==0: print(\"zero\") else: print(\"minus\")"
] |
[
"float(input('Informe a temperatura em graus ºC: ')) fah = ((celsius * 9) /",
"celsius = float(input('Informe a temperatura em graus ºC: ')) fah = ((celsius *",
"')) fah = ((celsius * 9) / 5) + 32 print('A temperatura de",
"temperatura em graus ºC: ')) fah = ((celsius * 9) / 5) +",
"<reponame>isabellathome/College-Activities celsius = float(input('Informe a temperatura em graus ºC: ')) fah = ((celsius",
"= float(input('Informe a temperatura em graus ºC: ')) fah = ((celsius * 9)",
"graus ºC: ')) fah = ((celsius * 9) / 5) + 32 print('A",
"ºC: ')) fah = ((celsius * 9) / 5) + 32 print('A temperatura",
"fah = ((celsius * 9) / 5) + 32 print('A temperatura de {}ºC",
"em graus ºC: ')) fah = ((celsius * 9) / 5) + 32",
"* 9) / 5) + 32 print('A temperatura de {}ºC corresponde a {}ºF'.format(celsius,",
"= ((celsius * 9) / 5) + 32 print('A temperatura de {}ºC corresponde",
"a temperatura em graus ºC: ')) fah = ((celsius * 9) / 5)",
"((celsius * 9) / 5) + 32 print('A temperatura de {}ºC corresponde a",
"9) / 5) + 32 print('A temperatura de {}ºC corresponde a {}ºF'.format(celsius, fah))"
] |
[
"class Animal(Base): #TODO: Ajouter un autoincrément à la fin d'eRelevé __tablename__ = 'T_Animal'",
"autoincrément à la fin d'eRelevé __tablename__ = 'T_Animal' id = Column('PK_id', Integer, primary_key=True)",
") from ecorelevesensor.models import Base class Animal(Base): #TODO: Ajouter un autoincrément à la",
"from ecorelevesensor.models import Base class Animal(Base): #TODO: Ajouter un autoincrément à la fin",
"import ( Column, Index, Integer, Sequence, String, ) from ecorelevesensor.models import Base class",
"<filename>ecorelevesensor/models/animal.py from sqlalchemy import ( Column, Index, Integer, Sequence, String, ) from ecorelevesensor.models",
"#TODO: Ajouter un autoincrément à la fin d'eRelevé __tablename__ = 'T_Animal' id =",
"la fin d'eRelevé __tablename__ = 'T_Animal' id = Column('PK_id', Integer, primary_key=True) chip_code =",
"Column('PK_id', Integer, primary_key=True) chip_code = Column(String(10)) __table_args__ = ( Index('idx_Tanimal_chipcode_pk', chip_code, id), )",
"id = Column('PK_id', Integer, primary_key=True) chip_code = Column(String(10)) __table_args__ = ( Index('idx_Tanimal_chipcode_pk', chip_code,",
"import Base class Animal(Base): #TODO: Ajouter un autoincrément à la fin d'eRelevé __tablename__",
"Sequence, String, ) from ecorelevesensor.models import Base class Animal(Base): #TODO: Ajouter un autoincrément",
"Base class Animal(Base): #TODO: Ajouter un autoincrément à la fin d'eRelevé __tablename__ =",
"d'eRelevé __tablename__ = 'T_Animal' id = Column('PK_id', Integer, primary_key=True) chip_code = Column(String(10)) __table_args__",
"= Column('PK_id', Integer, primary_key=True) chip_code = Column(String(10)) __table_args__ = ( Index('idx_Tanimal_chipcode_pk', chip_code, id),",
"fin d'eRelevé __tablename__ = 'T_Animal' id = Column('PK_id', Integer, primary_key=True) chip_code = Column(String(10))",
"from sqlalchemy import ( Column, Index, Integer, Sequence, String, ) from ecorelevesensor.models import",
"Animal(Base): #TODO: Ajouter un autoincrément à la fin d'eRelevé __tablename__ = 'T_Animal' id",
"un autoincrément à la fin d'eRelevé __tablename__ = 'T_Animal' id = Column('PK_id', Integer,",
"String, ) from ecorelevesensor.models import Base class Animal(Base): #TODO: Ajouter un autoincrément à",
"à la fin d'eRelevé __tablename__ = 'T_Animal' id = Column('PK_id', Integer, primary_key=True) chip_code",
"( Column, Index, Integer, Sequence, String, ) from ecorelevesensor.models import Base class Animal(Base):",
"Index, Integer, Sequence, String, ) from ecorelevesensor.models import Base class Animal(Base): #TODO: Ajouter",
"__tablename__ = 'T_Animal' id = Column('PK_id', Integer, primary_key=True) chip_code = Column(String(10)) __table_args__ =",
"= 'T_Animal' id = Column('PK_id', Integer, primary_key=True) chip_code = Column(String(10)) __table_args__ = (",
"'T_Animal' id = Column('PK_id', Integer, primary_key=True) chip_code = Column(String(10)) __table_args__ = ( Index('idx_Tanimal_chipcode_pk',",
"Ajouter un autoincrément à la fin d'eRelevé __tablename__ = 'T_Animal' id = Column('PK_id',",
"Column, Index, Integer, Sequence, String, ) from ecorelevesensor.models import Base class Animal(Base): #TODO:",
"Integer, Sequence, String, ) from ecorelevesensor.models import Base class Animal(Base): #TODO: Ajouter un",
"ecorelevesensor.models import Base class Animal(Base): #TODO: Ajouter un autoincrément à la fin d'eRelevé",
"sqlalchemy import ( Column, Index, Integer, Sequence, String, ) from ecorelevesensor.models import Base"
] |
[
"argument '%s'\" \" to method get_profile_by_email_address\" % key ) params[key] = val del",
"returns the request thread. \"\"\" all_params = ['profile_id', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params =",
"the scope of API calls. :param int offset: The offset from the first",
":param str account_id: The account-ID of the profile. (required) :param list[str] organizations: A",
"required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is",
"return. :param str order_by: Specify a field used to order the result set.",
"parameter. By default 10 values are returned. Records are returned in natural order",
"'organizations', 'offset', 'records', 'order_by', 'order', 'include_retired'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key,",
"either ASC or DESC. :param bool include_retired: Whether retired profiles should be returned.",
"api.get_profile(profile_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param",
"or (params['profile_id'] is None): raise ValueError(\"Missing the required parameter `profile_id` when calling `get_profile`\")",
":param bool include_retired: Whether retired profiles should be returned. :return: ProfilePagedMetadata If the",
"License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required",
">>> thread = api.update_profile_with_http_info(request, callback=callback_function) :param callback function: The callback function for asynchronous",
"if 'records' in params: query_params['records'] = params['records'] if 'order_by' in params: query_params['order_by'] =",
"calling `get_profile_by_email_address`\") resource_path = '/profiles/email/{email}'.replace('{format}', 'json') path_params = {} if 'email' in params:",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the",
":param int offset: The offset from the first profile to return. :param int",
"method is called asynchronously, returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if",
"body_params = params['request'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['text/xml', 'application/xml', 'application/json'])",
"by the account-ID parameter. By default 10 values are returned. Records are returned",
"not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['application/json']) #",
"\"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_with_http_info(profile_id, **kwargs) else: (data) = self.get_profile_with_http_info(profile_id,",
"post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_account_id(self, account_id, **kwargs): \"\"\" Returns a",
"1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the \"License\");",
"raise ValueError(\"Missing the required parameter `request` when calling `update_profile`\") resource_path = '/profiles'.replace('{format}', 'json')",
"(params['profile_id'] is None): raise ValueError(\"Missing the required parameter `profile_id` when calling `get_profile`\") resource_path",
"if 'organizations' in params: query_params['organizations'] = params['organizations'] header_params = {} form_params = []",
"returns the request thread. \"\"\" all_params = ['account_id', 'organizations', 'offset', 'records', 'order_by', 'order']",
"header_params['Accept'] = self.api_client.\\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type`",
"License for the specific language governing permissions and limitations under the License. \"\"\"",
"A list of organization-IDs used to restrict the scope of API calls. :return:",
"resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params = {} header_params = {}",
"(data) = self.get_profile_with_http_info(profile_id, **kwargs) return data def get_profile_with_http_info(self, profile_id, **kwargs): \"\"\" Returns a",
"pprint(response) >>> >>> thread = api.get_profile(profile_id, callback=callback_function) :param callback function: The callback function",
"= ['account_id', 'organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for",
"data def update_profile_with_http_info(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This",
"query_params['organizations'] = params['organizations'] header_params = {} form_params = [] local_var_files = {} body_params",
":param callback function: The callback function for asynchronous request. (optional) :param list[str] organizations:",
"The email address of the profile. (required) :param list[str] organizations: A list of",
"class manually. Ref: https://github.com/swagger-api/swagger-codegen \"\"\" def __init__(self, api_client=None): config = Configuration() if api_client:",
"the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS",
"path_params['account-ID'] = params['account_id'] query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations']",
"None): raise ValueError(\"Missing the required parameter `profile_id` when calling `get_profile`\") resource_path = '/profiles/{profile-ID}'.replace('{format}',",
"The callback function for asynchronous request. (optional) :param UpdateProfileRequest request: The profile object",
"Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the \"License\"); you",
"request. (optional) :param str account_id: The account-ID of the profile. (required) :param list[str]",
"body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_email_address(self, email, **kwargs): \"\"\" Returns",
":param str profile_id: ID of the Profile. (required) :param list[str] organizations: A list",
"get_profile\" % key ) params[key] = val del params['kwargs'] # verify the required",
"self.api_client.\\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params,",
"by applicable law or agreed to in writing, software distributed under the License",
"order: Ihe direction of any ordering, either ASC or DESC. :param bool include_retired:",
"function: The callback function for asynchronous request. (optional) :param UpdateProfileRequest request: The profile",
"body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def update_profile(self, request, **kwargs): \"\"\" Update",
"the method is called asynchronously, returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True",
"the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_profiles_with_http_info(**kwargs) else: (data)",
") params[key] = val del params['kwargs'] # verify the required parameter 'request' is",
"get_profile_by_email_address\" % key ) params[key] = val del params['kwargs'] # verify the required",
"# HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type([]) # Authentication setting auth_settings =",
"the account-ID parameter. By default 10 values are returned. Records are returned in",
"response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function) :param",
"if ('request' not in params) or (params['request'] is None): raise ValueError(\"Missing the required",
"= self.api_client.\\ select_header_accept(['text/xml', 'application/xml', 'application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header",
"# HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept']",
"\"\"\" BillForward REST API OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under",
"in params: path_params['email'] = params['email'] query_params = {} if 'organizations' in params: query_params['organizations']",
"the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_profile_with_http_info(request, **kwargs) else:",
"response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id(account_id, callback=callback_function) :param",
"all_params = ['email', 'organizations', 'offset', 'records', 'order_by', 'order', 'include_retired'] all_params.append('callback') all_params.append('_return_http_data_only') params =",
"This method makes a synchronous HTTP request by default. To make an asynchronous",
"if key not in all_params: raise TypeError( \"Got an unexpected keyword argument '%s'\"",
"get_profile_by_account_id\" % key ) params[key] = val del params['kwargs'] # verify the required",
"\"Got an unexpected keyword argument '%s'\" \" to method update_profile\" % key )",
"del params['kwargs'] # verify the required parameter 'email' is set if ('email' not",
"the method is called asynchronously, returns the request thread. \"\"\" all_params = ['request']",
"all_params: raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to method update_profile\"",
"thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function) :param callback function: The callback function for asynchronous request.",
"thread. \"\"\" all_params = ['email', 'organizations', 'offset', 'records', 'order_by', 'order', 'include_retired'] all_params.append('callback') all_params.append('_return_http_data_only')",
"existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This method makes a synchronous HTTP request by default. To make",
"version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the",
"if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['application/json'])",
"\"\"\" Returns a single profile, specified by the ID parameter. {\\\"nickname\\\":\\\"Retrieve an existing",
"the required parameter 'account_id' is set if ('account_id' not in params) or (params['account_id']",
"ordering, either ASC or DESC. :return: ProfilePagedMetadata If the method is called asynchronously,",
"The profile object to be updated. (required) :return: ProfilePagedMetadata If the method is",
"an unexpected keyword argument '%s'\" \" to method get_profile_by_email_address\" % key ) params[key]",
"key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( \"Got an",
"OR CONDITIONS OF ANY KIND, either express or implied. See the License for",
"(params['account_id'] is None): raise ValueError(\"Missing the required parameter `account_id` when calling `get_profile_by_account_id`\") resource_path",
"ValueError(\"Missing the required parameter `account_id` when calling `get_profile_by_account_id`\") resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json') path_params",
"= params['order_by'] if 'order' in params: query_params['order'] = params['order'] if 'include_retired' in params:",
"'/profiles/account/{account-ID}'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['account-ID'] = params['account_id'] query_params",
"receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id_with_http_info(account_id,",
"(data) = self.get_profile_by_email_address_with_http_info(email, **kwargs) return data def get_profile_by_email_address_with_http_info(self, email, **kwargs): \"\"\" Returns a",
"may not use this file except in compliance with the License. You may",
"under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR",
"order {\\\"nickname\\\":\\\"Retrieve by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This method makes a synchronous HTTP request by default.",
"bool include_retired: Whether retired profiles should be returned. :return: ProfilePagedMetadata If the method",
"= True if kwargs.get('callback'): return self.update_profile_with_http_info(request, **kwargs) else: (data) = self.update_profile_with_http_info(request, **kwargs) return",
"raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_profile_by_account_id\" %",
"return self.get_all_profiles_with_http_info(**kwargs) else: (data) = self.get_all_profiles_with_http_info(**kwargs) return data def get_all_profiles_with_http_info(self, **kwargs): \"\"\" Returns",
"`update_profile`\") resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params = {} header_params =",
"thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_profile_with_http_info(request, **kwargs) else: (data) =",
"asynchronous request. (optional) :param list[str] organizations: A list of organizations used to restrict",
"if ('profile_id' not in params) or (params['profile_id'] is None): raise ValueError(\"Missing the required",
"the method is called asynchronously, returns the request thread. \"\"\" all_params = ['profile_id',",
"# Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params,",
"if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['text/plain'])",
"{\\\"nickname\\\":\\\"Retrieve an existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This method makes a synchronous HTTP request by default.",
"= '/profiles/email/{email}'.replace('{format}', 'json') path_params = {} if 'email' in params: path_params['email'] = params['email']",
"query_params['order_by'] = params['order_by'] if 'order' in params: query_params['order'] = params['order'] header_params = {}",
"for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( \"Got",
"profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method makes a synchronous HTTP request by default.",
"data def get_profile_by_email_address_with_http_info(self, email, **kwargs): \"\"\" Returns a single profile, specified by the",
"\" to method get_all_profiles\" % key ) params[key] = val del params['kwargs'] resource_path",
"params['account_id'] query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] if 'offset'",
"= params['order_by'] if 'order' in params: query_params['order'] = params['order'] header_params = {} form_params",
"path_params = {} if 'profile_id' in params: path_params['profile-ID'] = params['profile_id'] query_params = {}",
"receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile(request,",
"argument '%s'\" \" to method get_profile\" % key ) params[key] = val del",
"request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_account_id_with_http_info(account_id, **kwargs) else: (data)",
"'json') path_params = {} query_params = {} header_params = {} form_params = []",
"used to order the result set. :param str order: Ihe direction of any",
"to method update_profile\" % key ) params[key] = val del params['kwargs'] # verify",
"email: The email address of the profile. (required) :param list[str] organizations: A list",
"pprint(response) >>> >>> thread = api.update_profile_with_http_info(request, callback=callback_function) :param callback function: The callback function",
"a synchronous HTTP request by default. To make an asynchronous HTTP request, please",
"[] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] =",
"ProfilesApi(object): \"\"\" NOTE: This class is auto generated by the swagger code generator",
"params) or (params['profile_id'] is None): raise ValueError(\"Missing the required parameter `profile_id` when calling",
"argument '%s'\" \" to method get_all_profiles\" % key ) params[key] = val del",
"\" to method update_profile\" % key ) params[key] = val del params['kwargs'] #",
"natural order {\\\"nickname\\\":\\\"Get all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This method makes a synchronous HTTP request by",
"ASC or DESC. :param bool include_retired: Whether retired profiles should be returned. :return:",
"\"\"\" all_params = ['request'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in",
"thread = api.get_all_profiles(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional)",
"request thread. \"\"\" all_params = ['organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params",
"the specific language governing permissions and limitations under the License. \"\"\" from __future__",
"callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles(callback=callback_function) :param callback function: The callback",
"unexpected keyword argument '%s'\" \" to method get_profile_by_account_id\" % key ) params[key] =",
"val del params['kwargs'] resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params = {}",
"profiles to return. :param str order_by: Specify a field used to order the",
"the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_account_id_with_http_info(account_id, **kwargs) else:",
"ValueError(\"Missing the required parameter `email` when calling `get_profile_by_email_address`\") resource_path = '/profiles/email/{email}'.replace('{format}', 'json') path_params",
"of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to",
"Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use",
":param callback function: The callback function for asynchronous request. (optional) :param str email:",
"either ASC or DESC. :return: ProfilePagedMetadata If the method is called asynchronously, returns",
"params['include_retired'] header_params = {} form_params = [] local_var_files = {} body_params = None",
"body_params = None if 'request' in params: body_params = params['request'] # HTTP header",
"'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile(self,",
"swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen \"\"\"",
"import re # python 2 and python 3 compatibility library from six import",
"params['order'] if 'include_retired' in params: query_params['include_retired'] = params['include_retired'] header_params = {} form_params =",
"the profile. (required) :param list[str] organizations: A list of organizations used to restrict",
"thread. \"\"\" all_params = ['request'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val",
"key ) params[key] = val del params['kwargs'] # verify the required parameter 'email'",
"Ref: https://github.com/swagger-api/swagger-codegen \"\"\" def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client =",
"response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles(callback=callback_function) :param callback",
"called asynchronously, returns the request thread. \"\"\" all_params = ['request'] all_params.append('callback') all_params.append('_return_http_data_only') params",
"header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['application/json']) # Authentication setting auth_settings",
"makes a synchronous HTTP request by default. To make an asynchronous HTTP request,",
"\"\"\" Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method makes a synchronous HTTP",
"self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def",
"ApiClient class ProfilesApi(object): \"\"\" NOTE: This class is auto generated by the swagger",
"import absolute_import import sys import os import re # python 2 and python",
"required parameter `profile_id` when calling `get_profile`\") resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json') path_params = {}",
"returned. :return: ProfilePagedMetadata If the method is called asynchronously, returns the request thread.",
"os import re # python 2 and python 3 compatibility library from six",
"post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def update_profile(self, request, **kwargs): \"\"\" Update a",
"'order_by' in params: query_params['order_by'] = params['order_by'] if 'order' in params: query_params['order'] = params['order']",
"self.api_client.\\ select_header_content_type(['text/plain']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params,",
"'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def update_profile(self,",
"= self.api_client.\\ select_header_content_type([]) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params,",
"for asynchronous request. (optional) :param str profile_id: ID of the Profile. (required) :param",
"**kwargs): \"\"\" Returns a single profile, specified by the email parameter. {\\\"nickname\\\":\\\"Retrieve by",
"['profile_id', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if",
"not use this file except in compliance with the License. You may obtain",
"used to restrict the scope of API calls. :param int offset: The offset",
"of organizations used to restrict the scope of API calls. :param int offset:",
"auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata',",
"= '/profiles/{profile-ID}'.replace('{format}', 'json') path_params = {} if 'profile_id' in params: path_params['profile-ID'] = params['profile_id']",
"update_profile\" % key ) params[key] = val del params['kwargs'] # verify the required",
"to method get_all_profiles\" % key ) params[key] = val del params['kwargs'] resource_path =",
"six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class ProfilesApi(object):",
"argument '%s'\" \" to method update_profile\" % key ) params[key] = val del",
"parameter 'email' is set if ('email' not in params) or (params['email'] is None):",
"profile_id, **kwargs): \"\"\" Returns a single profile, specified by the ID parameter. {\\\"nickname\\\":\\\"Retrieve",
">>> thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function) :param callback function: The callback function for asynchronous",
"path_params = {} query_params = {} header_params = {} form_params = [] local_var_files",
"asynchronously, returns the request thread. \"\"\" all_params = ['organizations', 'offset', 'records', 'order_by', 'order']",
"api.get_profile_by_email_address(email, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param",
"A list of organizations used to restrict the scope of API calls. :param",
"BillForward REST API OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the",
"2.0 (the \"License\"); you may not use this file except in compliance with",
"parameter. {\\\"nickname\\\":\\\"Retrieve an existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This method makes a synchronous HTTP request by",
"raise ValueError(\"Missing the required parameter `email` when calling `get_profile_by_email_address`\") resource_path = '/profiles/email/{email}'.replace('{format}', 'json')",
"copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed",
"TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_profile_by_account_id\" % key",
"unexpected keyword argument '%s'\" \" to method get_profile_by_email_address\" % key ) params[key] =",
"def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address(email, callback=callback_function) :param callback function:",
"receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles(callback=callback_function)",
"'/profiles/{profile-ID}'.replace('{format}', 'json') path_params = {} if 'profile_id' in params: path_params['profile-ID'] = params['profile_id'] query_params",
"if 'organizations' in params: query_params['organizations'] = params['organizations'] if 'offset' in params: query_params['offset'] =",
"= api.get_profile(profile_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional)",
"function: The callback function for asynchronous request. (optional) :param list[str] organizations: A list",
"returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_profile_with_http_info(request, **kwargs)",
"del params['kwargs'] # verify the required parameter 'request' is set if ('request' not",
"the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address(email, callback=callback_function)",
"= {} body_params = None if 'request' in params: body_params = params['request'] #",
"callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function) :param callback function: The",
">>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id(account_id, callback=callback_function) :param callback",
"**kwargs) else: (data) = self.get_profile_with_http_info(profile_id, **kwargs) return data def get_profile_with_http_info(self, profile_id, **kwargs): \"\"\"",
"account_id: The account-ID of the profile. (required) :param list[str] organizations: A list of",
"API calls. :param int offset: The offset from the first profile to return.",
">>> >>> thread = api.update_profile(request, callback=callback_function) :param callback function: The callback function for",
"return data def get_profile_by_email_address_with_http_info(self, email, **kwargs): \"\"\" Returns a single profile, specified by",
"val del params['kwargs'] # verify the required parameter 'request' is set if ('request'",
"callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile_with_http_info(request, callback=callback_function) :param callback function: The",
"from the first profile to return. :param int records: The maximum number of",
"callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function) :param callback function: The",
"DESC. :return: ProfilePagedMetadata If the method is called asynchronously, returns the request thread.",
"= params['email'] query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] if",
"not in all_params: raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to",
"def get_profile_by_account_id_with_http_info(self, account_id, **kwargs): \"\"\" Returns a collection of profiles, specified by the",
">>> pprint(response) >>> >>> thread = api.update_profile(request, callback=callback_function) :param callback function: The callback",
">>> thread = api.get_all_profiles_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request.",
"'profile_id' in params: path_params['profile-ID'] = params['profile_id'] query_params = {} if 'organizations' in params:",
"return data def get_all_profiles_with_http_info(self, **kwargs): \"\"\" Returns a collection of all profiles. By",
"asynchronous request. (optional) :param str profile_id: ID of the Profile. (required) :param list[str]",
"['organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val",
"{} body_params = None if 'request' in params: body_params = params['request'] # HTTP",
"select_header_content_type([]) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params,",
"10 values are returned. Records are returned in natural order {\\\"nickname\\\":\\\"Retrieve by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"}",
"self.api_client.\\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] =",
"= params['account_id'] query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] if",
">>> pprint(response) >>> >>> thread = api.get_profile_with_http_info(profile_id, callback=callback_function) :param callback function: The callback",
"body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_account_id(self, account_id, **kwargs): \"\"\" Returns",
"returns the request thread. \"\"\" all_params = ['organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback')",
"'/profiles'.replace('{format}', 'json') path_params = {} query_params = {} header_params = {} form_params =",
"set if ('request' not in params) or (params['request'] is None): raise ValueError(\"Missing the",
"restrict the scope of API calls. :return: ProfilePagedMetadata If the method is called",
"the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile(profile_id, callback=callback_function)",
"receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address(email,",
"to method get_profile\" % key ) params[key] = val del params['kwargs'] # verify",
"method is called asynchronously, returns the request thread. \"\"\" all_params = ['account_id', 'organizations',",
"= {} form_params = [] local_var_files = {} body_params = None # HTTP",
"= self.get_all_profiles_with_http_info(**kwargs) return data def get_all_profiles_with_http_info(self, **kwargs): \"\"\" Returns a collection of all",
"select_header_accept(['text/xml', 'application/xml', 'application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type']",
"key ) params[key] = val del params['kwargs'] resource_path = '/profiles'.replace('{format}', 'json') path_params =",
"License. \"\"\" from __future__ import absolute_import import sys import os import re #",
"params: query_params['organizations'] = params['organizations'] header_params = {} form_params = [] local_var_files = {}",
"response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles_with_http_info(callback=callback_function) :param callback",
"= True if kwargs.get('callback'): return self.get_profile_with_http_info(profile_id, **kwargs) else: (data) = self.get_profile_with_http_info(profile_id, **kwargs) return",
"HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type([]) # Authentication setting auth_settings = []",
"permissions and limitations under the License. \"\"\" from __future__ import absolute_import import sys",
"is None): raise ValueError(\"Missing the required parameter `account_id` when calling `get_profile_by_account_id`\") resource_path =",
"= params['organizations'] if 'offset' in params: query_params['offset'] = params['offset'] if 'records' in params:",
"query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def update_profile(self, request, **kwargs):",
"header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['text/plain']) # Authentication",
"`callback` function to be invoked when receiving the response. >>> def callback_function(response): >>>",
"**kwargs): \"\"\" Returns a collection of all profiles. By default 10 values are",
"an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_with_http_info(profile_id, callback=callback_function) :param callback function:",
"are returned. Records are returned in natural order {\\\"nickname\\\":\\\"Retrieve by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This method",
"'order', 'include_retired'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if",
"obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law",
"request thread. \"\"\" all_params = ['account_id', 'organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only')",
"function for asynchronous request. (optional) :param str profile_id: ID of the Profile. (required)",
"return data def get_profile_with_http_info(self, profile_id, **kwargs): \"\"\" Returns a single profile, specified by",
"..configuration import Configuration from ..api_client import ApiClient class ProfilesApi(object): \"\"\" NOTE: This class",
"the required parameter `account_id` when calling `get_profile_by_account_id`\") resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json') path_params =",
"def get_all_profiles(self, **kwargs): \"\"\" Returns a collection of all profiles. By default 10",
"required parameter `email` when calling `get_profile_by_email_address`\") resource_path = '/profiles/email/{email}'.replace('{format}', 'json') path_params = {}",
"request thread. \"\"\" all_params = ['request'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key,",
"= [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept']",
"header_params = {} form_params = [] local_var_files = {} body_params = None if",
"License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,",
"files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_email_address(self, email, **kwargs): \"\"\" Returns a single",
"= None # HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['application/json']) if not header_params['Accept']:",
"\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"header_params = {} form_params = [] local_var_files = {} body_params = None #",
"callback function: The callback function for asynchronous request. (optional) :param UpdateProfileRequest request: The",
"The callback function for asynchronous request. (optional) :param str email: The email address",
"restrict the scope of API calls. :param int offset: The offset from the",
"required parameter `account_id` when calling `get_profile_by_account_id`\") resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json') path_params = {}",
"compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import",
"request. (optional) :param str profile_id: ID of the Profile. (required) :param list[str] organizations:",
"kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_profiles_with_http_info(**kwargs) else: (data) = self.get_all_profiles_with_http_info(**kwargs) return data",
"['request'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key",
"'%s'\" \" to method get_profile_by_email_address\" % key ) params[key] = val del params['kwargs']",
"{} if 'email' in params: path_params['email'] = params['email'] query_params = {} if 'organizations'",
"an unexpected keyword argument '%s'\" \" to method update_profile\" % key ) params[key]",
"required parameter 'email' is set if ('email' not in params) or (params['email'] is",
"(data) = self.get_all_profiles_with_http_info(**kwargs) return data def get_all_profiles_with_http_info(self, **kwargs): \"\"\" Returns a collection of",
"params['records'] if 'order_by' in params: query_params['order_by'] = params['order_by'] if 'order' in params: query_params['order']",
"Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient()",
"= ApiClient() self.api_client = config.api_client def get_all_profiles(self, **kwargs): \"\"\" Returns a collection of",
"params[key] = val del params['kwargs'] resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params",
"in params: query_params['order_by'] = params['order_by'] if 'order' in params: query_params['order'] = params['order'] if",
">>> thread = api.get_profile(profile_id, callback=callback_function) :param callback function: The callback function for asynchronous",
"`Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path,",
"The callback function for asynchronous request. (optional) :param str account_id: The account-ID of",
">>> thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function) :param callback function: The callback function for asynchronous",
") params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is",
"params['kwargs'] # verify the required parameter 'profile_id' is set if ('profile_id' not in",
"self.get_all_profiles_with_http_info(**kwargs) return data def get_all_profiles_with_http_info(self, **kwargs): \"\"\" Returns a collection of all profiles.",
"if kwargs.get('callback'): return self.update_profile_with_http_info(request, **kwargs) else: (data) = self.update_profile_with_http_info(request, **kwargs) return data def",
"HTTP request, please define a `callback` function to be invoked when receiving the",
"params: query_params['organizations'] = params['organizations'] if 'offset' in params: query_params['offset'] = params['offset'] if 'records'",
"def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile(profile_id, callback=callback_function) :param callback function:",
"response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_with_http_info(profile_id, callback=callback_function) :param",
"is set if ('request' not in params) or (params['request'] is None): raise ValueError(\"Missing",
"'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']):",
"callback function: The callback function for asynchronous request. (optional) :param list[str] organizations: A",
"'application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\",
"to be updated. (required) :return: ProfilePagedMetadata If the method is called asynchronously, returns",
"the request thread. \"\"\" all_params = ['profile_id', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals()",
"the scope of API calls. :return: ProfilePagedMetadata If the method is called asynchronously,",
"required parameter 'request' is set if ('request' not in params) or (params['request'] is",
"path_params = {} query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations']",
"api.get_profile_with_http_info(profile_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param",
"kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_email_address_with_http_info(email, **kwargs) else: (data) = self.get_profile_by_email_address_with_http_info(email, **kwargs)",
"the request thread. \"\"\" all_params = ['organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only')",
"(params['email'] is None): raise ValueError(\"Missing the required parameter `email` when calling `get_profile_by_email_address`\") resource_path",
"Returns a single profile, specified by the ID parameter. {\\\"nickname\\\":\\\"Retrieve an existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"}",
"= {} if 'organizations' in params: query_params['organizations'] = params['organizations'] if 'offset' in params:",
"in params) or (params['request'] is None): raise ValueError(\"Missing the required parameter `request` when",
"api.update_profile_with_http_info(request, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param",
"Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params,",
"= self.get_profile_by_email_address_with_http_info(email, **kwargs) return data def get_profile_by_email_address_with_http_info(self, email, **kwargs): \"\"\" Returns a single",
"'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_email_address(self,",
"of all profiles. By default 10 values are returned. Records are returned in",
"Returns a collection of profiles, specified by the account-ID parameter. By default 10",
"from __future__ import absolute_import import sys import os import re # python 2",
"(required) :param list[str] organizations: A list of organization-IDs used to restrict the scope",
"the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in",
"kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_account_id_with_http_info(account_id, **kwargs) else: (data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs)",
"the class manually. Ref: https://github.com/swagger-api/swagger-codegen \"\"\" def __init__(self, api_client=None): config = Configuration() if",
"Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method makes a synchronous HTTP request",
"response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address(email, callback=callback_function) :param",
"returned in natural order {\\\"nickname\\\":\\\"Retrieve by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This method makes a synchronous HTTP",
":return: ProfilePagedMetadata If the method is called asynchronously, returns the request thread. \"\"\"",
"_return_http_data_only=params.get('_return_http_data_only')) def update_profile(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This",
"'organizations' in params: query_params['organizations'] = params['organizations'] header_params = {} form_params = [] local_var_files",
"[] local_var_files = {} body_params = None if 'request' in params: body_params =",
"**kwargs): \"\"\" Returns a collection of profiles, specified by the account-ID parameter. By",
"thread = api.get_profile_by_account_id(account_id, callback=callback_function) :param callback function: The callback function for asynchronous request.",
"API OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License,",
"address of the profile. (required) :param list[str] organizations: A list of organizations used",
"records: The maximum number of profiles to return. :param str order_by: Specify a",
"parameter `request` when calling `update_profile`\") resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params",
"request thread. \"\"\" all_params = ['profile_id', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for",
"{} form_params = [] local_var_files = {} body_params = None # HTTP header",
"Unless required by applicable law or agreed to in writing, software distributed under",
"{\\\"nickname\\\":\\\"Retrieve by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This method makes a synchronous HTTP request by default. To",
"('request' not in params) or (params['request'] is None): raise ValueError(\"Missing the required parameter",
"(required) :return: ProfilePagedMetadata If the method is called asynchronously, returns the request thread.",
"response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def update_profile(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update",
"'offset' in params: query_params['offset'] = params['offset'] if 'records' in params: query_params['records'] = params['records']",
"is called asynchronously, returns the request thread. \"\"\" all_params = ['organizations', 'offset', 'records',",
"'records', 'order_by', 'order', 'include_retired'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in",
"ProfilePagedMetadata If the method is called asynchronously, returns the request thread. \"\"\" kwargs['_return_http_data_only']",
"'order' in params: query_params['order'] = params['order'] header_params = {} form_params = [] local_var_files",
"for asynchronous request. (optional) :param str account_id: The account-ID of the profile. (required)",
"10 values are returned. Records are returned in natural order {\\\"nickname\\\":\\\"Get all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"}",
"query_params['order'] = params['order'] if 'include_retired' in params: query_params['include_retired'] = params['include_retired'] header_params = {}",
"thread = api.get_all_profiles_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional)",
"of API calls. :param int offset: The offset from the first profile to",
"= True if kwargs.get('callback'): return self.get_profile_by_account_id_with_http_info(account_id, **kwargs) else: (data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs) return",
"if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def get_all_profiles(self, **kwargs): \"\"\"",
"= locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise",
"2 and python 3 compatibility library from six import iteritems from ..configuration import",
"api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client =",
"in params: path_params['account-ID'] = params['account_id'] query_params = {} if 'organizations' in params: query_params['organizations']",
"'include_retired' in params: query_params['include_retired'] = params['include_retired'] header_params = {} form_params = [] local_var_files",
"data def get_profile_with_http_info(self, profile_id, **kwargs): \"\"\" Returns a single profile, specified by the",
"{} form_params = [] local_var_files = {} body_params = None if 'request' in",
"organization-IDs used to restrict the scope of API calls. :return: ProfilePagedMetadata If the",
"params['kwargs'] resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params = {} if 'organizations'",
"select_header_content_type(['text/plain']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params,",
"pprint(response) >>> >>> thread = api.update_profile(request, callback=callback_function) :param callback function: The callback function",
"the required parameter `request` when calling `update_profile`\") resource_path = '/profiles'.replace('{format}', 'json') path_params =",
"software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT",
"def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile_with_http_info(request, callback=callback_function) :param callback function:",
"return data def update_profile_with_http_info(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"}",
"function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response)",
"an unexpected keyword argument '%s'\" \" to method get_profile_by_account_id\" % key ) params[key]",
"params['offset'] if 'records' in params: query_params['records'] = params['records'] if 'order_by' in params: query_params['order_by']",
"= params['profile_id'] query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] header_params",
"request. (optional) :param UpdateProfileRequest request: The profile object to be updated. (required) :return:",
"from ..api_client import ApiClient class ProfilesApi(object): \"\"\" NOTE: This class is auto generated",
"from ..configuration import Configuration from ..api_client import ApiClient class ProfilesApi(object): \"\"\" NOTE: This",
"default 10 values are returned. Records are returned in natural order {\\\"nickname\\\":\\\"Get all",
"def get_profile_by_account_id(self, account_id, **kwargs): \"\"\" Returns a collection of profiles, specified by the",
"'%s'\" \" to method update_profile\" % key ) params[key] = val del params['kwargs']",
"in writing, software distributed under the License is distributed on an \"AS IS\"",
"# HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['text/xml', 'application/xml', 'application/json']) if not header_params['Accept']:",
"query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_account_id(self, account_id, **kwargs):",
"not in params) or (params['profile_id'] is None): raise ValueError(\"Missing the required parameter `profile_id`",
"keyword argument '%s'\" \" to method update_profile\" % key ) params[key] = val",
"return self.update_profile_with_http_info(request, **kwargs) else: (data) = self.update_profile_with_http_info(request, **kwargs) return data def update_profile_with_http_info(self, request,",
"HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['text/plain']) # Authentication setting auth_settings = []",
"the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile(request, callback=callback_function)",
"{\\\"nickname\\\":\\\"Get all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This method makes a synchronous HTTP request by default. To",
"or agreed to in writing, software distributed under the License is distributed on",
"header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['text/xml', 'application/xml', 'application/json']) if not header_params['Accept']: del header_params['Accept']",
"The callback function for asynchronous request. (optional) :param list[str] organizations: A list of",
"= ['email', 'organizations', 'offset', 'records', 'order_by', 'order', 'include_retired'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals()",
"body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile(self, profile_id, **kwargs): \"\"\" Returns",
"local_var_files = {} body_params = None if 'request' in params: body_params = params['request']",
"asynchronously, returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_with_http_info(profile_id,",
"'/profiles'.replace('{format}', 'json') path_params = {} query_params = {} if 'organizations' in params: query_params['organizations']",
"if 'offset' in params: query_params['offset'] = params['offset'] if 'records' in params: query_params['records'] =",
"is called asynchronously, returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'):",
"get_all_profiles(self, **kwargs): \"\"\" Returns a collection of all profiles. By default 10 values",
"callback function: The callback function for asynchronous request. (optional) :param str profile_id: ID",
"params: query_params['order'] = params['order'] header_params = {} form_params = [] local_var_files = {}",
"the method is called asynchronously, returns the request thread. \"\"\" all_params = ['email',",
"int records: The maximum number of profiles to return. :param str order_by: Specify",
">>> >>> thread = api.get_all_profiles_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous",
"`request` when calling `update_profile`\") resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params =",
"is None): raise ValueError(\"Missing the required parameter `request` when calling `update_profile`\") resource_path =",
"parameter 'request' is set if ('request' not in params) or (params['request'] is None):",
"all profiles. By default 10 values are returned. Records are returned in natural",
"method get_all_profiles\" % key ) params[key] = val del params['kwargs'] resource_path = '/profiles'.replace('{format}',",
"# HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['text/plain']) # Authentication setting auth_settings =",
"query_params['offset'] = params['offset'] if 'records' in params: query_params['records'] = params['records'] if 'order_by' in",
"kwargs.get('callback'): return self.get_profile_with_http_info(profile_id, **kwargs) else: (data) = self.get_profile_with_http_info(profile_id, **kwargs) return data def get_profile_with_http_info(self,",
"query_params['include_retired'] = params['include_retired'] header_params = {} form_params = [] local_var_files = {} body_params",
"order_by: Specify a field used to order the result set. :param str order:",
"Records are returned in natural order {\\\"nickname\\\":\\\"Retrieve by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This method makes a",
"receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile_with_http_info(request,",
"if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client",
"function: The callback function for asynchronous request. (optional) :param str account_id: The account-ID",
"python 2 and python 3 compatibility library from six import iteritems from ..configuration",
"= api.update_profile_with_http_info(request, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional)",
"config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def get_all_profiles(self, **kwargs): \"\"\" Returns a",
"organizations: A list of organization-IDs used to restrict the scope of API calls.",
"get_profile_by_account_id(self, account_id, **kwargs): \"\"\" Returns a collection of profiles, specified by the account-ID",
"get_profile(self, profile_id, **kwargs): \"\"\" Returns a single profile, specified by the ID parameter.",
"and python 3 compatibility library from six import iteritems from ..configuration import Configuration",
"def get_profile_by_email_address(self, email, **kwargs): \"\"\" Returns a single profile, specified by the email",
"this file except in compliance with the License. You may obtain a copy",
"the ID parameter. {\\\"nickname\\\":\\\"Retrieve an existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This method makes a synchronous HTTP",
"in params) or (params['account_id'] is None): raise ValueError(\"Missing the required parameter `account_id` when",
"kwargs.get('callback'): return self.get_profile_by_email_address_with_http_info(email, **kwargs) else: (data) = self.get_profile_by_email_address_with_http_info(email, **kwargs) return data def get_profile_by_email_address_with_http_info(self,",
"response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile(self, profile_id, **kwargs): \"\"\" Returns a single profile,",
"params['kwargs'] # verify the required parameter 'email' is set if ('email' not in",
"you may not use this file except in compliance with the License. You",
"params: query_params['offset'] = params['offset'] if 'records' in params: query_params['records'] = params['records'] if 'order_by'",
"is called asynchronously, returns the request thread. \"\"\" all_params = ['request'] all_params.append('callback') all_params.append('_return_http_data_only')",
"https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the \"License\"); you may not",
"\"\"\" all_params = ['email', 'organizations', 'offset', 'records', 'order_by', 'order', 'include_retired'] all_params.append('callback') all_params.append('_return_http_data_only') params",
"may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable",
"params['profile_id'] query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] header_params =",
"please define a `callback` function to be invoked when receiving the response. >>>",
"or DESC. :param bool include_retired: Whether retired profiles should be returned. :return: ProfilePagedMetadata",
"email parameter. {\\\"nickname\\\":\\\"Retrieve by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This method makes a synchronous HTTP request by",
"list of organizations used to restrict the scope of API calls. :param int",
"TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_profile_by_email_address\" % key",
"# HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['application/json']) # Authentication setting auth_settings =",
"body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['application/json']) if not",
":param list[str] organizations: A list of organizations used to restrict the scope of",
"if 'account_id' in params: path_params['account-ID'] = params['account_id'] query_params = {} if 'organizations' in",
"params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params:",
"= params['records'] if 'order_by' in params: query_params['order_by'] = params['order_by'] if 'order' in params:",
"thread = api.update_profile_with_http_info(request, callback=callback_function) :param callback function: The callback function for asynchronous request.",
">>> >>> thread = api.update_profile_with_http_info(request, callback=callback_function) :param callback function: The callback function for",
"asynchronously, returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_profiles_with_http_info(**kwargs)",
"under the License. \"\"\" from __future__ import absolute_import import sys import os import",
"(optional) :param list[str] organizations: A list of organizations used to restrict the scope",
"\"\"\" all_params = ['account_id', 'organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params =",
"called asynchronously, returns the request thread. \"\"\" all_params = ['profile_id', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only')",
"thread = api.get_profile_by_email_address(email, callback=callback_function) :param callback function: The callback function for asynchronous request.",
"the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen",
"form_params = [] local_var_files = {} body_params = None # HTTP header `Accept`",
">>> pprint(response) >>> >>> thread = api.get_profile_by_account_id(account_id, callback=callback_function) :param callback function: The callback",
"request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_email_address_with_http_info(email, **kwargs) else: (data)",
"if ('email' not in params) or (params['email'] is None): raise ValueError(\"Missing the required",
"api.update_profile(request, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param",
"asynchronously, returns the request thread. \"\"\" all_params = ['account_id', 'organizations', 'offset', 'records', 'order_by',",
"\"Got an unexpected keyword argument '%s'\" \" to method get_all_profiles\" % key )",
"header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile(self, profile_id, **kwargs): \"\"\"",
"\"\"\" NOTE: This class is auto generated by the swagger code generator program.",
"auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_email_address(self, email, **kwargs): \"\"\" Returns a single profile, specified",
"raise ValueError(\"Missing the required parameter `profile_id` when calling `get_profile`\") resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json')",
"thread = api.get_profile_with_http_info(profile_id, callback=callback_function) :param callback function: The callback function for asynchronous request.",
">>> pprint(response) >>> >>> thread = api.get_profile(profile_id, callback=callback_function) :param callback function: The callback",
"or (params['email'] is None): raise ValueError(\"Missing the required parameter `email` when calling `get_profile_by_email_address`\")",
"by the swagger code generator program. Do not edit the class manually. Ref:",
"python 3 compatibility library from six import iteritems from ..configuration import Configuration from",
"result set. :param str order: Ihe direction of any ordering, either ASC or",
"required parameter 'profile_id' is set if ('profile_id' not in params) or (params['profile_id'] is",
"query_params['order'] = params['order'] header_params = {} form_params = [] local_var_files = {} body_params",
"the required parameter `email` when calling `get_profile_by_email_address`\") resource_path = '/profiles/email/{email}'.replace('{format}', 'json') path_params =",
"receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id(account_id,",
"generated by the swagger code generator program. Do not edit the class manually.",
"(data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs) return data def get_profile_by_account_id_with_http_info(self, account_id, **kwargs): \"\"\" Returns a",
"else: (data) = self.get_profile_by_email_address_with_http_info(email, **kwargs) return data def get_profile_by_email_address_with_http_info(self, email, **kwargs): \"\"\" Returns",
"kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_with_http_info(profile_id, **kwargs) else: (data) = self.get_profile_with_http_info(profile_id, **kwargs)",
":param str email: The email address of the profile. (required) :param list[str] organizations:",
"maximum number of profiles to return. :param str order_by: Specify a field used",
">>> pprint(response) >>> >>> thread = api.update_profile_with_http_info(request, callback=callback_function) :param callback function: The callback",
"a single profile, specified by the email parameter. {\\\"nickname\\\":\\\"Retrieve by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This method",
"to method get_profile_by_account_id\" % key ) params[key] = val del params['kwargs'] # verify",
":param callback function: The callback function for asynchronous request. (optional) :param UpdateProfileRequest request:",
"\" to method get_profile_by_account_id\" % key ) params[key] = val del params['kwargs'] #",
"`profile_id` when calling `get_profile`\") resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json') path_params = {} if 'profile_id'",
"header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['text/plain']) # Authentication setting auth_settings",
"auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_account_id(self, account_id, **kwargs): \"\"\" Returns a collection of profiles,",
"Ihe direction of any ordering, either ASC or DESC. :return: ProfilePagedMetadata If the",
"# Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params,",
"if 'include_retired' in params: query_params['include_retired'] = params['include_retired'] header_params = {} form_params = []",
"file except in compliance with the License. You may obtain a copy of",
"account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This method makes a synchronous HTTP request by default. To make an",
"asynchronously, returns the request thread. \"\"\" all_params = ['profile_id', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params",
"for asynchronous request. (optional) :param UpdateProfileRequest request: The profile object to be updated.",
"del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['text/plain']) # Authentication setting",
"is set if ('email' not in params) or (params['email'] is None): raise ValueError(\"Missing",
"raise ValueError(\"Missing the required parameter `account_id` when calling `get_profile_by_account_id`\") resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json')",
"= api.get_all_profiles_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param",
"True if kwargs.get('callback'): return self.get_profile_by_email_address_with_http_info(email, **kwargs) else: (data) = self.get_profile_by_email_address_with_http_info(email, **kwargs) return data",
"function for asynchronous request. (optional) :param list[str] organizations: A list of organizations used",
"thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_account_id_with_http_info(account_id, **kwargs) else: (data) =",
"thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_with_http_info(profile_id, **kwargs) else: (data) =",
"= params['offset'] if 'records' in params: query_params['records'] = params['records'] if 'order_by' in params:",
"iteritems(params['kwargs']): if key not in all_params: raise TypeError( \"Got an unexpected keyword argument",
">>> thread = api.get_profile_by_email_address(email, callback=callback_function) :param callback function: The callback function for asynchronous",
"\" to method get_profile_by_email_address\" % key ) params[key] = val del params['kwargs'] #",
"..api_client import ApiClient class ProfilesApi(object): \"\"\" NOTE: This class is auto generated by",
"path_params['email'] = params['email'] query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations']",
"pprint(response) >>> >>> thread = api.get_all_profiles(callback=callback_function) :param callback function: The callback function for",
"edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen \"\"\" def __init__(self, api_client=None): config = Configuration()",
"by the email parameter. {\\\"nickname\\\":\\\"Retrieve by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This method makes a synchronous HTTP",
"account-ID parameter. By default 10 values are returned. Records are returned in natural",
"parameter `account_id` when calling `get_profile_by_account_id`\") resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json') path_params = {} if",
"response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_email_address(self, email, **kwargs): \"\"\" Returns a single profile,",
"else: (data) = self.update_profile_with_http_info(request, **kwargs) return data def update_profile_with_http_info(self, request, **kwargs): \"\"\" Update",
"not in params) or (params['email'] is None): raise ValueError(\"Missing the required parameter `email`",
"response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function) :param",
"of any ordering, either ASC or DESC. :return: ProfilePagedMetadata If the method is",
"callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def update_profile(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"}",
"in params: query_params['offset'] = params['offset'] if 'records' in params: query_params['records'] = params['records'] if",
"= Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client =",
"in params: query_params['order'] = params['order'] header_params = {} form_params = [] local_var_files =",
"when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread =",
"callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile(request, callback=callback_function) :param callback function: The",
"direction of any ordering, either ASC or DESC. :return: ProfilePagedMetadata If the method",
"resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json') path_params = {} if 'profile_id' in params: path_params['profile-ID'] =",
"if kwargs.get('callback'): return self.get_profile_by_account_id_with_http_info(account_id, **kwargs) else: (data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs) return data def",
"path_params = {} if 'account_id' in params: path_params['account-ID'] = params['account_id'] query_params = {}",
"= True if kwargs.get('callback'): return self.get_profile_by_email_address_with_http_info(email, **kwargs) else: (data) = self.get_profile_by_email_address_with_http_info(email, **kwargs) return",
"header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type([]) # Authentication setting auth_settings",
"law or agreed to in writing, software distributed under the License is distributed",
"set if ('profile_id' not in params) or (params['profile_id'] is None): raise ValueError(\"Missing the",
"\"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_email_address_with_http_info(email, **kwargs) else: (data) = self.get_profile_by_email_address_with_http_info(email,",
"Version 2.0 (the \"License\"); you may not use this file except in compliance",
"\"Got an unexpected keyword argument '%s'\" \" to method get_profile_by_email_address\" % key )",
"callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_with_http_info(profile_id, callback=callback_function) :param callback function: The",
">>> pprint(response) >>> >>> thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function) :param callback function: The callback",
"is called asynchronously, returns the request thread. \"\"\" all_params = ['profile_id', 'organizations'] all_params.append('callback')",
"path_params['profile-ID'] = params['profile_id'] query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations']",
"called asynchronously, returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return",
"profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This method makes a synchronous HTTP request by default. To make an",
"a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method makes a synchronous HTTP request by",
"the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile_with_http_info(request, callback=callback_function)",
"under the Apache License, Version 2.0 (the \"License\"); you may not use this",
"for the specific language governing permissions and limitations under the License. \"\"\" from",
"list of organization-IDs used to restrict the scope of API calls. :return: ProfilePagedMetadata",
"`Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['text/xml', 'application/xml', 'application/json']) if not header_params['Accept']: del header_params['Accept'] #",
"asynchronous request. (optional) :param UpdateProfileRequest request: The profile object to be updated. (required)",
"del params['kwargs'] resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params = {} if",
"= True if kwargs.get('callback'): return self.get_all_profiles_with_http_info(**kwargs) else: (data) = self.get_all_profiles_with_http_info(**kwargs) return data def",
"or implied. See the License for the specific language governing permissions and limitations",
"The account-ID of the profile. (required) :param list[str] organizations: A list of organizations",
"in params: query_params['organizations'] = params['organizations'] header_params = {} form_params = [] local_var_files =",
"pprint(response) >>> >>> thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function) :param callback function: The callback function",
"the required parameter 'email' is set if ('email' not in params) or (params['email']",
"raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to method update_profile\" %",
"Returns a single profile, specified by the email parameter. {\\\"nickname\\\":\\\"Retrieve by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This",
"collection of all profiles. By default 10 values are returned. Records are returned",
"**kwargs): \"\"\" Returns a single profile, specified by the ID parameter. {\\\"nickname\\\":\\\"Retrieve an",
"CONDITIONS OF ANY KIND, either express or implied. See the License for the",
"field used to order the result set. :param str order: Ihe direction of",
"'organizations' in params: query_params['organizations'] = params['organizations'] if 'offset' in params: query_params['offset'] = params['offset']",
"params) or (params['account_id'] is None): raise ValueError(\"Missing the required parameter `account_id` when calling",
"thread. \"\"\" all_params = ['account_id', 'organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params",
"return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'))",
"be returned. :return: ProfilePagedMetadata If the method is called asynchronously, returns the request",
"except in compliance with the License. You may obtain a copy of the",
"called asynchronously, returns the request thread. \"\"\" all_params = ['account_id', 'organizations', 'offset', 'records',",
">>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address(email, callback=callback_function) :param callback",
"__future__ import absolute_import import sys import os import re # python 2 and",
"ProfilePagedMetadata If the method is called asynchronously, returns the request thread. \"\"\" all_params",
"default 10 values are returned. Records are returned in natural order {\\\"nickname\\\":\\\"Retrieve by",
">>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function) :param callback",
"self.get_profile_by_account_id_with_http_info(account_id, **kwargs) return data def get_profile_by_account_id_with_http_info(self, account_id, **kwargs): \"\"\" Returns a collection of",
"= val del params['kwargs'] # verify the required parameter 'email' is set if",
"not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def get_all_profiles(self, **kwargs): \"\"\" Returns",
"profile, specified by the email parameter. {\\\"nickname\\\":\\\"Retrieve by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This method makes a",
"an unexpected keyword argument '%s'\" \" to method get_profile\" % key ) params[key]",
"'%s'\" \" to method get_profile\" % key ) params[key] = val del params['kwargs']",
">>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles(callback=callback_function) :param callback function:",
"= self.api_client.\\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type']",
"Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params,",
"= [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings,",
"method is called asynchronously, returns the request thread. \"\"\" all_params = ['organizations', 'offset',",
"get_all_profiles\" % key ) params[key] = val del params['kwargs'] resource_path = '/profiles'.replace('{format}', 'json')",
"a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method makes a synchronous HTTP request by default. To make",
"required parameter `request` when calling `update_profile`\") resource_path = '/profiles'.replace('{format}', 'json') path_params = {}",
"not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type([]) #",
"{} query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] if 'offset'",
"response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile(profile_id, callback=callback_function) :param",
"params[key] = val del params['kwargs'] # verify the required parameter 'profile_id' is set",
"all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This method makes a synchronous HTTP request by default. To make",
"of the Profile. (required) :param list[str] organizations: A list of organization-IDs used to",
"params[key] = val del params['kwargs'] # verify the required parameter 'account_id' is set",
"= self.get_profile_by_account_id_with_http_info(account_id, **kwargs) return data def get_profile_by_account_id_with_http_info(self, account_id, **kwargs): \"\"\" Returns a collection",
"express or implied. See the License for the specific language governing permissions and",
"None if 'request' in params: body_params = params['request'] # HTTP header `Accept` header_params['Accept']",
"def get_profile(self, profile_id, **kwargs): \"\"\" Returns a single profile, specified by the ID",
"all_params: raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_profile_by_email_address\"",
"is called asynchronously, returns the request thread. \"\"\" all_params = ['email', 'organizations', 'offset',",
"int offset: The offset from the first profile to return. :param int records:",
"val del params['kwargs'] # verify the required parameter 'profile_id' is set if ('profile_id'",
"config.api_client def get_all_profiles(self, **kwargs): \"\"\" Returns a collection of all profiles. By default",
"all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in",
"to restrict the scope of API calls. :param int offset: The offset from",
"= params['include_retired'] header_params = {} form_params = [] local_var_files = {} body_params =",
"val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( \"Got an unexpected",
"or (params['account_id'] is None): raise ValueError(\"Missing the required parameter `account_id` when calling `get_profile_by_account_id`\")",
"{\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method makes a synchronous HTTP request by default. To",
"\"Got an unexpected keyword argument '%s'\" \" to method get_profile\" % key )",
"the request thread. \"\"\" all_params = ['request'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for",
"function: The callback function for asynchronous request. (optional) :param str profile_id: ID of",
"profile. (required) :param list[str] organizations: A list of organizations used to restrict the",
"verify the required parameter 'account_id' is set if ('account_id' not in params) or",
"str order_by: Specify a field used to order the result set. :param str",
"key not in all_params: raise TypeError( \"Got an unexpected keyword argument '%s'\" \"",
"'/profiles/email/{email}'.replace('{format}', 'json') path_params = {} if 'email' in params: path_params['email'] = params['email'] query_params",
"BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See",
"update_profile(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method makes",
"not in params) or (params['request'] is None): raise ValueError(\"Missing the required parameter `request`",
"**kwargs) return data def get_profile_by_email_address_with_http_info(self, email, **kwargs): \"\"\" Returns a single profile, specified",
"path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile(self, profile_id,",
"By default 10 values are returned. Records are returned in natural order {\\\"nickname\\\":\\\"Retrieve",
":param callback function: The callback function for asynchronous request. (optional) :param str account_id:",
"limitations under the License. \"\"\" from __future__ import absolute_import import sys import os",
"an unexpected keyword argument '%s'\" \" to method get_all_profiles\" % key ) params[key]",
">>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_with_http_info(profile_id, callback=callback_function) :param callback",
"by default. To make an asynchronous HTTP request, please define a `callback` function",
"response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile_with_http_info(request, callback=callback_function) :param",
"= api.get_all_profiles(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param",
"scope of API calls. :param int offset: The offset from the first profile",
"for asynchronous request. (optional) :param list[str] organizations: A list of organizations used to",
"profiles, specified by the account-ID parameter. By default 10 values are returned. Records",
"params['order'] header_params = {} form_params = [] local_var_files = {} body_params = None",
"in params: query_params['order'] = params['order'] if 'include_retired' in params: query_params['include_retired'] = params['include_retired'] header_params",
"TypeError( \"Got an unexpected keyword argument '%s'\" \" to method update_profile\" % key",
"keyword argument '%s'\" \" to method get_profile_by_account_id\" % key ) params[key] = val",
"method update_profile\" % key ) params[key] = val del params['kwargs'] # verify the",
"return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'))",
"{} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['application/json']) if",
"e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This method makes a synchronous HTTP request by default. To make an",
"receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile(profile_id,",
"`email` when calling `get_profile_by_email_address`\") resource_path = '/profiles/email/{email}'.replace('{format}', 'json') path_params = {} if 'email'",
">>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile(request, callback=callback_function) :param callback",
"callback function for asynchronous request. (optional) :param str profile_id: ID of the Profile.",
"in natural order {\\\"nickname\\\":\\\"Get all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This method makes a synchronous HTTP request",
"asynchronously, returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_email_address_with_http_info(email,",
"not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['text/plain']) #",
"receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address_with_http_info(email,",
"method get_profile_by_account_id\" % key ) params[key] = val del params['kwargs'] # verify the",
"'%s'\" \" to method get_all_profiles\" % key ) params[key] = val del params['kwargs']",
"def get_profile_with_http_info(self, profile_id, **kwargs): \"\"\" Returns a single profile, specified by the ID",
"the required parameter 'request' is set if ('request' not in params) or (params['request']",
"synchronous HTTP request by default. To make an asynchronous HTTP request, please define",
"in natural order {\\\"nickname\\\":\\\"Retrieve by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This method makes a synchronous HTTP request",
"def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id(account_id, callback=callback_function) :param callback function:",
"'order' in params: query_params['order'] = params['order'] if 'include_retired' in params: query_params['include_retired'] = params['include_retired']",
"verify the required parameter 'email' is set if ('email' not in params) or",
"thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_email_address_with_http_info(email, **kwargs) else: (data) =",
"\"\"\" def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else:",
"get_profile_by_email_address_with_http_info(self, email, **kwargs): \"\"\" Returns a single profile, specified by the email parameter.",
"{} if 'account_id' in params: path_params['account-ID'] = params['account_id'] query_params = {} if 'organizations'",
"params['organizations'] if 'offset' in params: query_params['offset'] = params['offset'] if 'records' in params: query_params['records']",
"header_params['Accept'] = self.api_client.\\ select_header_accept(['text/xml', 'application/xml', 'application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP",
"**kwargs) return data def get_profile_with_http_info(self, profile_id, **kwargs): \"\"\" Returns a single profile, specified",
"Configuration from ..api_client import ApiClient class ProfilesApi(object): \"\"\" NOTE: This class is auto",
"params[key] = val del params['kwargs'] # verify the required parameter 'email' is set",
"def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function) :param callback function:",
"kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_profile_with_http_info(request, **kwargs) else: (data) = self.update_profile_with_http_info(request, **kwargs)",
"the first profile to return. :param int records: The maximum number of profiles",
"the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id(account_id, callback=callback_function)",
"params['request'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['text/xml', 'application/xml', 'application/json']) if not",
"{} if 'profile_id' in params: path_params['profile-ID'] = params['profile_id'] query_params = {} if 'organizations'",
"unexpected keyword argument '%s'\" \" to method get_profile\" % key ) params[key] =",
"params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not in",
"is set if ('profile_id' not in params) or (params['profile_id'] is None): raise ValueError(\"Missing",
"(optional) :param str profile_id: ID of the Profile. (required) :param list[str] organizations: A",
") params[key] = val del params['kwargs'] # verify the required parameter 'email' is",
"'request' in params: body_params = params['request'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\\",
"= val del params['kwargs'] resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params =",
"all_params: raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_profile_by_account_id\"",
"= api.get_profile_by_email_address_with_http_info(email, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional)",
"self.update_profile_with_http_info(request, **kwargs) return data def update_profile_with_http_info(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update",
"organizations used to restrict the scope of API calls. :param int offset: The",
"['account_id', 'organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key,",
"resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['account-ID'] =",
"= {} if 'profile_id' in params: path_params['profile-ID'] = params['profile_id'] query_params = {} if",
"calling `update_profile`\") resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params = {} header_params",
"('profile_id' not in params) or (params['profile_id'] is None): raise ValueError(\"Missing the required parameter",
"HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['application/json']) # Authentication setting auth_settings = []",
"header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type([]) # Authentication",
"of profiles to return. :param str order_by: Specify a field used to order",
"IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"utf-8 \"\"\" BillForward REST API OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed",
"to return. :param int records: The maximum number of profiles to return. :param",
">>> >>> thread = api.get_profile(profile_id, callback=callback_function) :param callback function: The callback function for",
"in compliance with the License. You may obtain a copy of the License",
"KIND, either express or implied. See the License for the specific language governing",
"the request thread. \"\"\" all_params = ['account_id', 'organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback')",
"writing, software distributed under the License is distributed on an \"AS IS\" BASIS,",
"True if kwargs.get('callback'): return self.get_profile_with_http_info(profile_id, **kwargs) else: (data) = self.get_profile_with_http_info(profile_id, **kwargs) return data",
"`get_profile`\") resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json') path_params = {} if 'profile_id' in params: path_params['profile-ID']",
"By default 10 values are returned. Records are returned in natural order {\\\"nickname\\\":\\\"Get",
"get_profile_with_http_info(self, profile_id, **kwargs): \"\"\" Returns a single profile, specified by the ID parameter.",
"include_retired: Whether retired profiles should be returned. :return: ProfilePagedMetadata If the method is",
"query_params['organizations'] = params['organizations'] if 'offset' in params: query_params['offset'] = params['offset'] if 'records' in",
"query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_email_address(self, email, **kwargs):",
"{} if 'organizations' in params: query_params['organizations'] = params['organizations'] header_params = {} form_params =",
"= None if 'request' in params: body_params = params['request'] # HTTP header `Accept`",
"return self.get_profile_by_email_address_with_http_info(email, **kwargs) else: (data) = self.get_profile_by_email_address_with_http_info(email, **kwargs) return data def get_profile_by_email_address_with_http_info(self, email,",
"not in params) or (params['account_id'] is None): raise ValueError(\"Missing the required parameter `account_id`",
"Records are returned in natural order {\\\"nickname\\\":\\\"Get all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This method makes a",
"callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address(email, callback=callback_function) :param callback function: The",
"if 'order' in params: query_params['order'] = params['order'] if 'include_retired' in params: query_params['include_retired'] =",
"request by default. To make an asynchronous HTTP request, please define a `callback`",
"get_profile_by_account_id_with_http_info(self, account_id, **kwargs): \"\"\" Returns a collection of profiles, specified by the account-ID",
"'request' is set if ('request' not in params) or (params['request'] is None): raise",
"order the result set. :param str order: Ihe direction of any ordering, either",
"verify the required parameter 'request' is set if ('request' not in params) or",
"callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id(account_id, callback=callback_function) :param callback function: The",
"setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files,",
"OF ANY KIND, either express or implied. See the License for the specific",
"the Profile. (required) :param list[str] organizations: A list of organization-IDs used to restrict",
"path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_email_address(self, email,",
"query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] if 'offset' in",
"params['kwargs'] # verify the required parameter 'request' is set if ('request' not in",
"thread. \"\"\" all_params = ['profile_id', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key,",
"'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key",
"the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles(callback=callback_function) :param",
"in params) or (params['profile_id'] is None): raise ValueError(\"Missing the required parameter `profile_id` when",
":param callback function: The callback function for asynchronous request. (optional) :param str profile_id:",
"files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile(self, profile_id, **kwargs): \"\"\" Returns a single",
"config.api_client = ApiClient() self.api_client = config.api_client def get_all_profiles(self, **kwargs): \"\"\" Returns a collection",
"all_params: raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_profile\"",
"query_params['order_by'] = params['order_by'] if 'order' in params: query_params['order'] = params['order'] if 'include_retired' in",
"if 'request' in params: body_params = params['request'] # HTTP header `Accept` header_params['Accept'] =",
"to return. :param str order_by: Specify a field used to order the result",
"('account_id' not in params) or (params['account_id'] is None): raise ValueError(\"Missing the required parameter",
"Profile. (required) :param list[str] organizations: A list of organization-IDs used to restrict the",
"request. (optional) :param str email: The email address of the profile. (required) :param",
"calling `get_profile`\") resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json') path_params = {} if 'profile_id' in params:",
"natural order {\\\"nickname\\\":\\\"Retrieve by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This method makes a synchronous HTTP request by",
"See the License for the specific language governing permissions and limitations under the",
"method get_profile\" % key ) params[key] = val del params['kwargs'] # verify the",
"OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version",
"'include_retired'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key",
"= params['order'] header_params = {} form_params = [] local_var_files = {} body_params =",
"and limitations under the License. \"\"\" from __future__ import absolute_import import sys import",
"form_params = [] local_var_files = {} body_params = None if 'request' in params:",
"number of profiles to return. :param str order_by: Specify a field used to",
"= api.get_profile_with_http_info(profile_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional)",
"direction of any ordering, either ASC or DESC. :param bool include_retired: Whether retired",
"a field used to order the result set. :param str order: Ihe direction",
"'account_id' is set if ('account_id' not in params) or (params['account_id'] is None): raise",
"The maximum number of profiles to return. :param str order_by: Specify a field",
"val del params['kwargs'] # verify the required parameter 'email' is set if ('email'",
"\"License\"); you may not use this file except in compliance with the License.",
"= '/profiles'.replace('{format}', 'json') path_params = {} query_params = {} if 'organizations' in params:",
"is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen \"\"\" def __init__(self, api_client=None):",
"function: The callback function for asynchronous request. (optional) :param str email: The email",
"agreed to in writing, software distributed under the License is distributed on an",
"header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type([]) # Authentication setting auth_settings = [] return",
"of profiles, specified by the account-ID parameter. By default 10 values are returned.",
"method is called asynchronously, returns the request thread. \"\"\" all_params = ['request'] all_params.append('callback')",
":param str order: Ihe direction of any ordering, either ASC or DESC. :return:",
"for asynchronous request. (optional) :param str email: The email address of the profile.",
"def get_profile_by_email_address_with_http_info(self, email, **kwargs): \"\"\" Returns a single profile, specified by the email",
"is None): raise ValueError(\"Missing the required parameter `profile_id` when calling `get_profile`\") resource_path =",
"api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param",
"params: body_params = params['request'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['text/xml', 'application/xml',",
"implied. See the License for the specific language governing permissions and limitations under",
"select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\",
"= self.update_profile_with_http_info(request, **kwargs) return data def update_profile_with_http_info(self, request, **kwargs): \"\"\" Update a profile",
"by the ID parameter. {\\\"nickname\\\":\\\"Retrieve an existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This method makes a synchronous",
"REST API OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache",
"query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile(self, profile_id, **kwargs):",
"'profile_id' is set if ('profile_id' not in params) or (params['profile_id'] is None): raise",
"all_params = ['account_id', 'organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals()",
"should be returned. :return: ProfilePagedMetadata If the method is called asynchronously, returns the",
"function for asynchronous request. (optional) :param str account_id: The account-ID of the profile.",
"# verify the required parameter 'profile_id' is set if ('profile_id' not in params)",
"of any ordering, either ASC or DESC. :param bool include_retired: Whether retired profiles",
"data def get_profile_by_account_id_with_http_info(self, account_id, **kwargs): \"\"\" Returns a collection of profiles, specified by",
"the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_email_address_with_http_info(email, **kwargs) else:",
"**kwargs) else: (data) = self.get_profile_by_email_address_with_http_info(email, **kwargs) return data def get_profile_by_email_address_with_http_info(self, email, **kwargs): \"\"\"",
">>> >>> thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function) :param callback function: The callback function for",
"HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['text/xml', 'application/xml', 'application/json']) if not header_params['Accept']: del",
"True if kwargs.get('callback'): return self.get_all_profiles_with_http_info(**kwargs) else: (data) = self.get_all_profiles_with_http_info(**kwargs) return data def get_all_profiles_with_http_info(self,",
"verify the required parameter 'profile_id' is set if ('profile_id' not in params) or",
"= val del params['kwargs'] # verify the required parameter 'profile_id' is set if",
"ID parameter. {\\\"nickname\\\":\\\"Retrieve an existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This method makes a synchronous HTTP request",
") params[key] = val del params['kwargs'] # verify the required parameter 'profile_id' is",
"TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_all_profiles\" % key",
"callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles_with_http_info(callback=callback_function) :param callback function: The callback",
"required by applicable law or agreed to in writing, software distributed under the",
"__init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not",
"define a `callback` function to be invoked when receiving the response. >>> def",
"DESC. :param bool include_retired: Whether retired profiles should be returned. :return: ProfilePagedMetadata If",
"callback function for asynchronous request. (optional) :param str account_id: The account-ID of the",
"all_params: raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_all_profiles\"",
"params) or (params['request'] is None): raise ValueError(\"Missing the required parameter `request` when calling",
"in params) or (params['email'] is None): raise ValueError(\"Missing the required parameter `email` when",
"the method is called asynchronously, returns the request thread. \"\"\" all_params = ['organizations',",
"# verify the required parameter 'request' is set if ('request' not in params)",
"'order_by', 'order', 'include_retired'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']):",
"offset from the first profile to return. :param int records: The maximum number",
"by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the \"License\"); you may",
"a single profile, specified by the ID parameter. {\\\"nickname\\\":\\\"Retrieve an existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This",
"= [] local_var_files = {} body_params = None if 'request' in params: body_params",
"library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient",
"if 'order_by' in params: query_params['order_by'] = params['order_by'] if 'order' in params: query_params['order'] =",
"api.get_all_profiles_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str]",
"params: query_params['order_by'] = params['order_by'] if 'order' in params: query_params['order'] = params['order'] if 'include_retired'",
"single profile, specified by the ID parameter. {\\\"nickname\\\":\\\"Retrieve an existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This method",
"set. :param str order: Ihe direction of any ordering, either ASC or DESC.",
"by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This method makes a synchronous HTTP request by default. To make",
"asynchronously, returns the request thread. \"\"\" all_params = ['request'] all_params.append('callback') all_params.append('_return_http_data_only') params =",
"is set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError(\"Missing",
"account_id, **kwargs): \"\"\" Returns a collection of profiles, specified by the account-ID parameter.",
"import ApiClient class ProfilesApi(object): \"\"\" NOTE: This class is auto generated by the",
"kwargs.get('callback'): return self.get_all_profiles_with_http_info(**kwargs) else: (data) = self.get_all_profiles_with_http_info(**kwargs) return data def get_all_profiles_with_http_info(self, **kwargs): \"\"\"",
"% key ) params[key] = val del params['kwargs'] resource_path = '/profiles'.replace('{format}', 'json') path_params",
"the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_with_http_info(profile_id, **kwargs) else:",
"pprint(response) >>> >>> thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function) :param callback function: The callback function",
"ANY KIND, either express or implied. See the License for the specific language",
"= ['profile_id', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']):",
":param str order_by: Specify a field used to order the result set. :param",
"request: The profile object to be updated. (required) :return: ProfilePagedMetadata If the method",
"# python 2 and python 3 compatibility library from six import iteritems from",
"This class is auto generated by the swagger code generator program. Do not",
"is None): raise ValueError(\"Missing the required parameter `email` when calling `get_profile_by_email_address`\") resource_path =",
"def update_profile(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method",
"return. :param int records: The maximum number of profiles to return. :param str",
"The callback function for asynchronous request. (optional) :param str profile_id: ID of the",
"set if ('email' not in params) or (params['email'] is None): raise ValueError(\"Missing the",
"the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless",
"asynchronous HTTP request, please define a `callback` function to be invoked when receiving",
"self.update_profile_with_http_info(request, **kwargs) else: (data) = self.update_profile_with_http_info(request, **kwargs) return data def update_profile_with_http_info(self, request, **kwargs):",
"(the \"License\"); you may not use this file except in compliance with the",
"callback function for asynchronous request. (optional) :param UpdateProfileRequest request: The profile object to",
"params['order_by'] if 'order' in params: query_params['order'] = params['order'] header_params = {} form_params =",
"post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile(self, profile_id, **kwargs): \"\"\" Returns a",
"or DESC. :return: ProfilePagedMetadata If the method is called asynchronously, returns the request",
"= '/profiles'.replace('{format}', 'json') path_params = {} query_params = {} header_params = {} form_params",
"(optional) :param UpdateProfileRequest request: The profile object to be updated. (required) :return: ProfilePagedMetadata",
"kwargs.get('callback'): return self.get_profile_by_account_id_with_http_info(account_id, **kwargs) else: (data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs) return data def get_profile_by_account_id_with_http_info(self,",
"**kwargs) else: (data) = self.update_profile_with_http_info(request, **kwargs) return data def update_profile_with_http_info(self, request, **kwargs): \"\"\"",
"in params: body_params = params['request'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['text/xml',",
"callback function: The callback function for asynchronous request. (optional) :param str account_id: The",
"asynchronously, returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_account_id_with_http_info(account_id,",
"first profile to return. :param int records: The maximum number of profiles to",
"http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed",
"The offset from the first profile to return. :param int records: The maximum",
"License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF",
"in params: query_params['order_by'] = params['order_by'] if 'order' in params: query_params['order'] = params['order'] header_params",
"self.api_client.\\ select_header_content_type([]) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params,",
"'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in",
"kwargs.get('callback'): return self.update_profile_with_http_info(request, **kwargs) else: (data) = self.update_profile_with_http_info(request, **kwargs) return data def update_profile_with_http_info(self,",
"params: path_params['profile-ID'] = params['profile_id'] query_params = {} if 'organizations' in params: query_params['organizations'] =",
"asynchronously, returns the request thread. \"\"\" all_params = ['email', 'organizations', 'offset', 'records', 'order_by',",
"= config.api_client def get_all_profiles(self, **kwargs): \"\"\" Returns a collection of all profiles. By",
"callback function for asynchronous request. (optional) :param list[str] organizations: A list of organizations",
"unexpected keyword argument '%s'\" \" to method get_all_profiles\" % key ) params[key] =",
"api.get_all_profiles(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param list[str]",
"'records' in params: query_params['records'] = params['records'] if 'order_by' in params: query_params['order_by'] = params['order_by']",
"either express or implied. See the License for the specific language governing permissions",
"query_params['records'] = params['records'] if 'order_by' in params: query_params['order_by'] = params['order_by'] if 'order' in",
"query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] header_params = {}",
"**kwargs) return data def update_profile_with_http_info(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a",
"Apache License, Version 2.0 (the \"License\"); you may not use this file except",
"NOTE: This class is auto generated by the swagger code generator program. Do",
"**kwargs) return data def get_profile_by_account_id_with_http_info(self, account_id, **kwargs): \"\"\" Returns a collection of profiles,",
"values are returned. Records are returned in natural order {\\\"nickname\\\":\\\"Get all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This",
"'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_account_id(self,",
"path_params = {} if 'email' in params: path_params['email'] = params['email'] query_params = {}",
"def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles_with_http_info(callback=callback_function) :param callback function: The",
"= {} form_params = [] local_var_files = {} body_params = None if 'request'",
"if kwargs.get('callback'): return self.get_profile_with_http_info(profile_id, **kwargs) else: (data) = self.get_profile_with_http_info(profile_id, **kwargs) return data def",
"None # HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['application/json']) if not header_params['Accept']: del",
"to in writing, software distributed under the License is distributed on an \"AS",
"('email' not in params) or (params['email'] is None): raise ValueError(\"Missing the required parameter",
"\" to method get_profile\" % key ) params[key] = val del params['kwargs'] #",
"argument '%s'\" \" to method get_profile_by_account_id\" % key ) params[key] = val del",
"parameter 'profile_id' is set if ('profile_id' not in params) or (params['profile_id'] is None):",
">>> thread = api.get_profile_by_account_id(account_id, callback=callback_function) :param callback function: The callback function for asynchronous",
"def update_profile_with_http_info(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method",
"resource_path = '/profiles/email/{email}'.replace('{format}', 'json') path_params = {} if 'email' in params: path_params['email'] =",
"= ['request'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if",
":param UpdateProfileRequest request: The profile object to be updated. (required) :return: ProfilePagedMetadata If",
"self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client",
">>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function) :param callback",
"\"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_profile_with_http_info(request, **kwargs) else: (data) = self.update_profile_with_http_info(request,",
"ID of the Profile. (required) :param list[str] organizations: A list of organization-IDs used",
"profile, specified by the ID parameter. {\\\"nickname\\\":\\\"Retrieve an existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This method makes",
"unexpected keyword argument '%s'\" \" to method update_profile\" % key ) params[key] =",
"(required) :param list[str] organizations: A list of organizations used to restrict the scope",
"{} header_params = {} form_params = [] local_var_files = {} body_params = None",
"pprint(response) >>> >>> thread = api.get_profile_by_account_id(account_id, callback=callback_function) :param callback function: The callback function",
"method makes a synchronous HTTP request by default. To make an asynchronous HTTP",
"in params: query_params['include_retired'] = params['include_retired'] header_params = {} form_params = [] local_var_files =",
"order: Ihe direction of any ordering, either ASC or DESC. :return: ProfilePagedMetadata If",
"`Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header",
"of organization-IDs used to restrict the scope of API calls. :return: ProfilePagedMetadata If",
"local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\\",
"if 'order' in params: query_params['order'] = params['order'] header_params = {} form_params = []",
">>> >>> thread = api.get_profile_by_account_id(account_id, callback=callback_function) :param callback function: The callback function for",
"method is called asynchronously, returns the request thread. \"\"\" all_params = ['email', 'organizations',",
"= api.get_profile_by_account_id(account_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional)",
"asynchronously, returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_profile_with_http_info(request,",
"= self.api_client.\\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params,",
"thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function) :param callback function: The callback function for asynchronous request.",
"sys import os import re # python 2 and python 3 compatibility library",
"str account_id: The account-ID of the profile. (required) :param list[str] organizations: A list",
"updated. (required) :return: ProfilePagedMetadata If the method is called asynchronously, returns the request",
"header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT',",
"invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread",
"return data def get_profile_by_account_id_with_http_info(self, account_id, **kwargs): \"\"\" Returns a collection of profiles, specified",
"'application/xml', 'application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] =",
"Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen \"\"\" def __init__(self, api_client=None): config",
"{} if 'organizations' in params: query_params['organizations'] = params['organizations'] if 'offset' in params: query_params['offset']",
"'%s'\" \" to method get_profile_by_account_id\" % key ) params[key] = val del params['kwargs']",
"raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_all_profiles\" %",
"= {} if 'email' in params: path_params['email'] = params['email'] query_params = {} if",
"in iteritems(params['kwargs']): if key not in all_params: raise TypeError( \"Got an unexpected keyword",
"(params['request'] is None): raise ValueError(\"Missing the required parameter `request` when calling `update_profile`\") resource_path",
">>> pprint(response) >>> >>> thread = api.get_profile_by_email_address(email, callback=callback_function) :param callback function: The callback",
"params: query_params['include_retired'] = params['include_retired'] header_params = {} form_params = [] local_var_files = {}",
"\"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_account_id_with_http_info(account_id, **kwargs) else: (data) = self.get_profile_by_account_id_with_http_info(account_id,",
"else: (data) = self.get_all_profiles_with_http_info(**kwargs) return data def get_all_profiles_with_http_info(self, **kwargs): \"\"\" Returns a collection",
"True if kwargs.get('callback'): return self.update_profile_with_http_info(request, **kwargs) else: (data) = self.update_profile_with_http_info(request, **kwargs) return data",
">>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile(profile_id, callback=callback_function) :param callback",
"get_all_profiles_with_http_info(self, **kwargs): \"\"\" Returns a collection of all profiles. By default 10 values",
"profile_id: ID of the Profile. (required) :param list[str] organizations: A list of organization-IDs",
"callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_account_id(self, account_id, **kwargs): \"\"\" Returns a collection of profiles, specified",
"pprint(response) >>> >>> thread = api.get_profile_with_http_info(profile_id, callback=callback_function) :param callback function: The callback function",
"organizations: A list of organizations used to restrict the scope of API calls.",
"= {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['application/json'])",
"header_params['Content-Type'] = self.api_client.\\ select_header_content_type([]) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET',",
"are returned in natural order {\\\"nickname\\\":\\\"Get all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This method makes a synchronous",
"https://github.com/swagger-api/swagger-codegen \"\"\" def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client",
"list[str] organizations: A list of organizations used to restrict the scope of API",
"in params: query_params['records'] = params['records'] if 'order_by' in params: query_params['order_by'] = params['order_by'] if",
"header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['text/plain']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET',",
"= self.api_client.\\ select_header_content_type(['text/plain']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params,",
"at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software",
"in params: query_params['organizations'] = params['organizations'] if 'offset' in params: query_params['offset'] = params['offset'] if",
"else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def get_all_profiles(self, **kwargs):",
"returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_with_http_info(profile_id, **kwargs)",
"params: path_params['account-ID'] = params['account_id'] query_params = {} if 'organizations' in params: query_params['organizations'] =",
"\"\"\" Returns a collection of profiles, specified by the account-ID parameter. By default",
":param str order: Ihe direction of any ordering, either ASC or DESC. :param",
"params['order_by'] if 'order' in params: query_params['order'] = params['order'] if 'include_retired' in params: query_params['include_retired']",
"returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_profiles_with_http_info(**kwargs) else:",
"object to be updated. (required) :return: ProfilePagedMetadata If the method is called asynchronously,",
"files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_account_id(self, account_id, **kwargs): \"\"\" Returns a collection",
"= params['order'] if 'include_retired' in params: query_params['include_retired'] = params['include_retired'] header_params = {} form_params",
"= self.get_profile_with_http_info(profile_id, **kwargs) return data def get_profile_with_http_info(self, profile_id, **kwargs): \"\"\" Returns a single",
"import iteritems from ..configuration import Configuration from ..api_client import ApiClient class ProfilesApi(object): \"\"\"",
"= api.update_profile(request, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional)",
"all_params = ['request'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']):",
"governing permissions and limitations under the License. \"\"\" from __future__ import absolute_import import",
"the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles_with_http_info(callback=callback_function) :param",
"if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type([])",
"set if ('account_id' not in params) or (params['account_id'] is None): raise ValueError(\"Missing the",
"callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile(profile_id, callback=callback_function) :param callback function: The",
"the License for the specific language governing permissions and limitations under the License.",
"'offset', 'records', 'order_by', 'order', 'include_retired'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val",
"when calling `get_profile_by_email_address`\") resource_path = '/profiles/email/{email}'.replace('{format}', 'json') path_params = {} if 'email' in",
"default. To make an asynchronous HTTP request, please define a `callback` function to",
"API calls. :return: ProfilePagedMetadata If the method is called asynchronously, returns the request",
"= params['request'] # HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['text/xml', 'application/xml', 'application/json']) if",
"self.get_profile_by_email_address_with_http_info(email, **kwargs) return data def get_profile_by_email_address_with_http_info(self, email, **kwargs): \"\"\" Returns a single profile,",
"None): raise ValueError(\"Missing the required parameter `email` when calling `get_profile_by_email_address`\") resource_path = '/profiles/email/{email}'.replace('{format}',",
"auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata',",
"['email', 'organizations', 'offset', 'records', 'order_by', 'order', 'include_retired'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for",
">>> thread = api.get_profile_with_http_info(profile_id, callback=callback_function) :param callback function: The callback function for asynchronous",
"\"\"\" Returns a single profile, specified by the email parameter. {\\\"nickname\\\":\\\"Retrieve by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"}",
"returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_account_id_with_http_info(account_id, **kwargs)",
"to order the result set. :param str order: Ihe direction of any ordering,",
"auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile(self, profile_id, **kwargs): \"\"\" Returns a single profile, specified",
"be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>>",
"License, Version 2.0 (the \"License\"); you may not use this file except in",
"(optional) :param str account_id: The account-ID of the profile. (required) :param list[str] organizations:",
"data def get_all_profiles_with_http_info(self, **kwargs): \"\"\" Returns a collection of all profiles. By default",
"'organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val",
"called asynchronously, returns the request thread. \"\"\" all_params = ['organizations', 'offset', 'records', 'order_by',",
"ValueError(\"Missing the required parameter `profile_id` when calling `get_profile`\") resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json') path_params",
"\"Got an unexpected keyword argument '%s'\" \" to method get_profile_by_account_id\" % key )",
"= {} if 'account_id' in params: path_params['account-ID'] = params['account_id'] query_params = {} if",
"else: (data) = self.get_profile_with_http_info(profile_id, **kwargs) return data def get_profile_with_http_info(self, profile_id, **kwargs): \"\"\" Returns",
"return self.get_profile_by_account_id_with_http_info(account_id, **kwargs) else: (data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs) return data def get_profile_by_account_id_with_http_info(self, account_id,",
"make an asynchronous HTTP request, please define a `callback` function to be invoked",
"self.get_profile_with_http_info(profile_id, **kwargs) else: (data) = self.get_profile_with_http_info(profile_id, **kwargs) return data def get_profile_with_http_info(self, profile_id, **kwargs):",
"# verify the required parameter 'email' is set if ('email' not in params)",
"ValueError(\"Missing the required parameter `request` when calling `update_profile`\") resource_path = '/profiles'.replace('{format}', 'json') path_params",
"key ) params[key] = val del params['kwargs'] # verify the required parameter 'request'",
"profile object to be updated. (required) :return: ProfilePagedMetadata If the method is called",
"spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0",
"email, **kwargs): \"\"\" Returns a single profile, specified by the email parameter. {\\\"nickname\\\":\\\"Retrieve",
"str order: Ihe direction of any ordering, either ASC or DESC. :return: ProfilePagedMetadata",
"profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method makes a synchronous HTTP request by default. To make an",
"or (params['request'] is None): raise ValueError(\"Missing the required parameter `request` when calling `update_profile`\")",
"params: path_params['email'] = params['email'] query_params = {} if 'organizations' in params: query_params['organizations'] =",
"the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function)",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License",
"'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if",
"`Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['text/plain']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path,",
"specified by the email parameter. {\\\"nickname\\\":\\\"Retrieve by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This method makes a synchronous",
"from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class",
"values are returned. Records are returned in natural order {\\\"nickname\\\":\\\"Retrieve by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This",
"params['organizations'] header_params = {} form_params = [] local_var_files = {} body_params = None",
"val del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id'",
">>> thread = api.update_profile(request, callback=callback_function) :param callback function: The callback function for asynchronous",
"parameter 'account_id' is set if ('account_id' not in params) or (params['account_id'] is None):",
"True if kwargs.get('callback'): return self.get_profile_by_account_id_with_http_info(account_id, **kwargs) else: (data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs) return data",
"header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return",
"list[str] organizations: A list of organization-IDs used to restrict the scope of API",
"del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['application/json']) # Authentication setting",
"= val del params['kwargs'] # verify the required parameter 'account_id' is set if",
"the result set. :param str order: Ihe direction of any ordering, either ASC",
"raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_profile\" %",
"api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def get_all_profiles(self,",
"keyword argument '%s'\" \" to method get_profile\" % key ) params[key] = val",
"profiles should be returned. :return: ProfilePagedMetadata If the method is called asynchronously, returns",
"select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params,",
"all_params = ['organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for",
"self.get_profile_by_email_address_with_http_info(email, **kwargs) else: (data) = self.get_profile_by_email_address_with_http_info(email, **kwargs) return data def get_profile_by_email_address_with_http_info(self, email, **kwargs):",
"used to restrict the scope of API calls. :return: ProfilePagedMetadata If the method",
">>> >>> thread = api.get_all_profiles(callback=callback_function) :param callback function: The callback function for asynchronous",
"manually. Ref: https://github.com/swagger-api/swagger-codegen \"\"\" def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client",
">>> pprint(response) >>> >>> thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function) :param callback function: The callback",
"None): raise ValueError(\"Missing the required parameter `request` when calling `update_profile`\") resource_path = '/profiles'.replace('{format}',",
"be updated. (required) :return: ProfilePagedMetadata If the method is called asynchronously, returns the",
"update_profile_with_http_info(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method makes",
"single profile, specified by the email parameter. {\\\"nickname\\\":\\\"Retrieve by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This method makes",
">>> >>> thread = api.get_profile_by_email_address(email, callback=callback_function) :param callback function: The callback function for",
"an asynchronous HTTP request, please define a `callback` function to be invoked when",
"an existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This method makes a synchronous HTTP request by default. To",
"in all_params: raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to method",
"`get_profile_by_email_address`\") resource_path = '/profiles/email/{email}'.replace('{format}', 'json') path_params = {} if 'email' in params: path_params['email']",
"\"\"\" from __future__ import absolute_import import sys import os import re # python",
"self.get_profile_with_http_info(profile_id, **kwargs) return data def get_profile_with_http_info(self, profile_id, **kwargs): \"\"\" Returns a single profile,",
"params['email'] query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] if 'offset'",
"\"\"\" Returns a collection of all profiles. By default 10 values are returned.",
"coding: utf-8 \"\"\" BillForward REST API OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git",
"del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type([]) # Authentication setting",
"specified by the ID parameter. {\\\"nickname\\\":\\\"Retrieve an existing profile\\\",\\\"response\\\":\\\"getProfileByID.html\\\"} This method makes a",
"callback function: The callback function for asynchronous request. (optional) :param str email: The",
"on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"str profile_id: ID of the Profile. (required) :param list[str] organizations: A list of",
"the License. \"\"\" from __future__ import absolute_import import sys import os import re",
"language governing permissions and limitations under the License. \"\"\" from __future__ import absolute_import",
"the request thread. \"\"\" all_params = ['email', 'organizations', 'offset', 'records', 'order_by', 'order', 'include_retired']",
"header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def update_profile(self, request, **kwargs): \"\"\"",
"pprint(response) >>> >>> thread = api.get_all_profiles_with_http_info(callback=callback_function) :param callback function: The callback function for",
"'json') path_params = {} query_params = {} if 'organizations' in params: query_params['organizations'] =",
"of API calls. :return: ProfilePagedMetadata If the method is called asynchronously, returns the",
"retired profiles should be returned. :return: ProfilePagedMetadata If the method is called asynchronously,",
"calls. :param int offset: The offset from the first profile to return. :param",
"params: query_params['records'] = params['records'] if 'order_by' in params: query_params['order_by'] = params['order_by'] if 'order'",
">>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile_with_http_info(request, callback=callback_function) :param callback",
"\"\"\" all_params = ['profile_id', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val",
"To make an asynchronous HTTP request, please define a `callback` function to be",
"returns the request thread. \"\"\" all_params = ['request'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals()",
"specified by the account-ID parameter. By default 10 values are returned. Records are",
"request thread. \"\"\" all_params = ['email', 'organizations', 'offset', 'records', 'order_by', 'order', 'include_retired'] all_params.append('callback')",
"def get_all_profiles_with_http_info(self, **kwargs): \"\"\" Returns a collection of all profiles. By default 10",
"a collection of profiles, specified by the account-ID parameter. By default 10 values",
"_return_http_data_only=params.get('_return_http_data_only')) def get_profile(self, profile_id, **kwargs): \"\"\" Returns a single profile, specified by the",
"thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_profiles_with_http_info(**kwargs) else: (data) = self.get_all_profiles_with_http_info(**kwargs)",
"None): raise ValueError(\"Missing the required parameter `account_id` when calling `get_profile_by_account_id`\") resource_path = '/profiles/account/{account-ID}'.replace('{format}',",
"receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_with_http_info(profile_id,",
">>> pprint(response) >>> >>> thread = api.get_all_profiles_with_http_info(callback=callback_function) :param callback function: The callback function",
"all_params = ['profile_id', 'organizations'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in",
"# verify the required parameter 'account_id' is set if ('account_id' not in params)",
"all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not",
"UpdateProfileRequest request: The profile object to be updated. (required) :return: ProfilePagedMetadata If the",
"`account_id` when calling `get_profile_by_account_id`\") resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json') path_params = {} if 'account_id'",
"= params['organizations'] header_params = {} form_params = [] local_var_files = {} body_params =",
"**kwargs) else: (data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs) return data def get_profile_by_account_id_with_http_info(self, account_id, **kwargs): \"\"\"",
"header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP",
"the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_with_http_info(profile_id, callback=callback_function)",
"distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES",
"import sys import os import re # python 2 and python 3 compatibility",
"returns the request thread. \"\"\" all_params = ['email', 'organizations', 'offset', 'records', 'order_by', 'order',",
"when calling `update_profile`\") resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params = {}",
">>> >>> thread = api.get_profile_with_http_info(profile_id, callback=callback_function) :param callback function: The callback function for",
"def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles(callback=callback_function) :param callback function: The",
"'json') path_params = {} if 'email' in params: path_params['email'] = params['email'] query_params =",
"callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str",
"the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function)",
"calling `get_profile_by_account_id`\") resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json') path_params = {} if 'account_id' in params:",
"query_params = {} header_params = {} form_params = [] local_var_files = {} body_params",
"request, please define a `callback` function to be invoked when receiving the response.",
"path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_account_id(self, account_id,",
"'email' is set if ('email' not in params) or (params['email'] is None): raise",
"email address of the profile. (required) :param list[str] organizations: A list of organizations",
"files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def update_profile(self, request, **kwargs): \"\"\" Update a profile",
"% key ) params[key] = val del params['kwargs'] # verify the required parameter",
"key ) params[key] = val del params['kwargs'] # verify the required parameter 'account_id'",
"use this file except in compliance with the License. You may obtain a",
"HTTP request by default. To make an asynchronous HTTP request, please define a",
"not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen \"\"\" def __init__(self, api_client=None): config =",
"ASC or DESC. :return: ProfilePagedMetadata If the method is called asynchronously, returns the",
"params: query_params['order_by'] = params['order_by'] if 'order' in params: query_params['order'] = params['order'] header_params =",
"= {} query_params = {} if 'organizations' in params: query_params['organizations'] = params['organizations'] if",
"thread. \"\"\" all_params = ['organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params =",
"params: query_params['order'] = params['order'] if 'include_retired' in params: query_params['include_retired'] = params['include_retired'] header_params =",
"a `callback` function to be invoked when receiving the response. >>> def callback_function(response):",
"a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or",
"`Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type([]) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path,",
"callback function for asynchronous request. (optional) :param str email: The email address of",
"the email parameter. {\\\"nickname\\\":\\\"Retrieve by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This method makes a synchronous HTTP request",
"api.get_profile_by_email_address_with_http_info(email, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param",
"the required parameter `profile_id` when calling `get_profile`\") resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json') path_params =",
"import os import re # python 2 and python 3 compatibility library from",
"keyword argument '%s'\" \" to method get_all_profiles\" % key ) params[key] = val",
"Whether retired profiles should be returned. :return: ProfilePagedMetadata If the method is called",
"= val del params['kwargs'] # verify the required parameter 'request' is set if",
"= {} if 'organizations' in params: query_params['organizations'] = params['organizations'] header_params = {} form_params",
"auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def update_profile(self, request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a",
"'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key",
"self.get_profile_by_account_id_with_http_info(account_id, **kwargs) else: (data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs) return data def get_profile_by_account_id_with_http_info(self, account_id, **kwargs):",
">>> >>> thread = api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function) :param callback function: The callback function for",
"returned in natural order {\\\"nickname\\\":\\\"Get all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This method makes a synchronous HTTP",
"HTTP header `Accept` header_params['Accept'] = self.api_client.\\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] #",
"request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_with_http_info(profile_id, **kwargs) else: (data)",
">>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles_with_http_info(callback=callback_function) :param callback function:",
"method is called asynchronously, returns the request thread. \"\"\" all_params = ['profile_id', 'organizations']",
"get_profile_by_email_address(self, email, **kwargs): \"\"\" Returns a single profile, specified by the email parameter.",
"distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"(optional) :param str email: The email address of the profile. (required) :param list[str]",
"post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_email_address(self, email, **kwargs): \"\"\" Returns a",
"profile to return. :param int records: The maximum number of profiles to return.",
"header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_account_id(self, account_id, **kwargs): \"\"\"",
"callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_email_address(self, email, **kwargs): \"\"\" Returns a single profile, specified by",
"def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if",
"when calling `get_profile_by_account_id`\") resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json') path_params = {} if 'account_id' in",
"path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def update_profile(self, request,",
">>> thread = api.get_all_profiles(callback=callback_function) :param callback function: The callback function for asynchronous request.",
"= ['organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key,",
"receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_all_profiles_with_http_info(callback=callback_function)",
"params[key] = val del params['kwargs'] # verify the required parameter 'request' is set",
"the method is called asynchronously, returns the request thread. \"\"\" all_params = ['account_id',",
"self.get_all_profiles_with_http_info(**kwargs) else: (data) = self.get_all_profiles_with_http_info(**kwargs) return data def get_all_profiles_with_http_info(self, **kwargs): \"\"\" Returns a",
"'json') path_params = {} if 'account_id' in params: path_params['account-ID'] = params['account_id'] query_params =",
"any ordering, either ASC or DESC. :param bool include_retired: Whether retired profiles should",
"raise TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_profile_by_email_address\" %",
"specific language governing permissions and limitations under the License. \"\"\" from __future__ import",
"def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile(request, callback=callback_function) :param callback function:",
"'email' in params: path_params['email'] = params['email'] query_params = {} if 'organizations' in params:",
"absolute_import import sys import os import re # python 2 and python 3",
"returned. Records are returned in natural order {\\\"nickname\\\":\\\"Get all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This method makes",
"return self.get_profile_with_http_info(profile_id, **kwargs) else: (data) = self.get_profile_with_http_info(profile_id, **kwargs) return data def get_profile_with_http_info(self, profile_id,",
"= '/profiles/account/{account-ID}'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['account-ID'] = params['account_id']",
"class is auto generated by the swagger code generator program. Do not edit",
":param int records: The maximum number of profiles to return. :param str order_by:",
"account-ID of the profile. (required) :param list[str] organizations: A list of organizations used",
"request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_profiles_with_http_info(**kwargs) else: (data) =",
"Returns a collection of all profiles. By default 10 values are returned. Records",
"header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_email_address(self, email, **kwargs): \"\"\"",
"profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This method makes a synchronous HTTP request by default. To make an",
"function for asynchronous request. (optional) :param str email: The email address of the",
"returned. Records are returned in natural order {\\\"nickname\\\":\\\"Retrieve by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This method makes",
"order {\\\"nickname\\\":\\\"Get all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This method makes a synchronous HTTP request by default.",
"If the method is called asynchronously, returns the request thread. \"\"\" kwargs['_return_http_data_only'] =",
"\"\"\" all_params = ['organizations', 'offset', 'records', 'order_by', 'order'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals()",
"locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError(",
"str order: Ihe direction of any ordering, either ASC or DESC. :param bool",
"if 'email' in params: path_params['email'] = params['email'] query_params = {} if 'organizations' in",
"parameter `profile_id` when calling `get_profile`\") resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json') path_params = {} if",
"request. (optional) :param list[str] organizations: A list of organizations used to restrict the",
"response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.update_profile(request, callback=callback_function) :param",
"the Apache License, Version 2.0 (the \"License\"); you may not use this file",
"callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param UpdateProfileRequest",
"code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen \"\"\" def",
"'account_id' in params: path_params['account-ID'] = params['account_id'] query_params = {} if 'organizations' in params:",
"response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_account_id(self, account_id, **kwargs): \"\"\" Returns a collection of",
"params) or (params['email'] is None): raise ValueError(\"Missing the required parameter `email` when calling",
"iteritems from ..configuration import Configuration from ..api_client import ApiClient class ProfilesApi(object): \"\"\" NOTE:",
"request, **kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method makes a",
"{} query_params = {} header_params = {} form_params = [] local_var_files = {}",
"in params: path_params['profile-ID'] = params['profile_id'] query_params = {} if 'organizations' in params: query_params['organizations']",
"called asynchronously, returns the request thread. \"\"\" all_params = ['email', 'organizations', 'offset', 'records',",
"`get_profile_by_account_id`\") resource_path = '/profiles/account/{account-ID}'.replace('{format}', 'json') path_params = {} if 'account_id' in params: path_params['account-ID']",
"ordering, either ASC or DESC. :param bool include_retired: Whether retired profiles should be",
"key ) params[key] = val del params['kwargs'] # verify the required parameter 'profile_id'",
"[] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'),",
"a collection of all profiles. By default 10 values are returned. Records are",
"header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['application/json']) # Authentication",
"{\\\"nickname\\\":\\\"Retrieve by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This method makes a synchronous HTTP request by default. To",
"api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client:",
"del params['kwargs'] # verify the required parameter 'profile_id' is set if ('profile_id' not",
"ApiClient() self.api_client = config.api_client def get_all_profiles(self, **kwargs): \"\"\" Returns a collection of all",
"calls. :return: ProfilePagedMetadata If the method is called asynchronously, returns the request thread.",
"setting auth_settings = [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files,",
"pprint(response) >>> >>> thread = api.get_profile_by_email_address(email, callback=callback_function) :param callback function: The callback function",
"generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen \"\"\" def __init__(self,",
"= api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def",
"asynchronous request. (optional) :param str account_id: The account-ID of the profile. (required) :param",
"if kwargs.get('callback'): return self.get_profile_by_email_address_with_http_info(email, **kwargs) else: (data) = self.get_profile_by_email_address_with_http_info(email, **kwargs) return data def",
"api.get_profile_by_account_id(account_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param",
"'json') path_params = {} if 'profile_id' in params: path_params['profile-ID'] = params['profile_id'] query_params =",
"[] return self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings, callback=params.get('callback'),",
"offset: The offset from the first profile to return. :param int records: The",
"by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This method makes a synchronous HTTP request by default. To make",
"str email: The email address of the profile. (required) :param list[str] organizations: A",
"(data) = self.update_profile_with_http_info(request, **kwargs) return data def update_profile_with_http_info(self, request, **kwargs): \"\"\" Update a",
"of the profile. (required) :param list[str] organizations: A list of organizations used to",
"function for asynchronous request. (optional) :param UpdateProfileRequest request: The profile object to be",
"class ProfilesApi(object): \"\"\" NOTE: This class is auto generated by the swagger code",
"thread = api.get_profile(profile_id, callback=callback_function) :param callback function: The callback function for asynchronous request.",
"self.api_client = config.api_client def get_all_profiles(self, **kwargs): \"\"\" Returns a collection of all profiles.",
"if kwargs.get('callback'): return self.get_all_profiles_with_http_info(**kwargs) else: (data) = self.get_all_profiles_with_http_info(**kwargs) return data def get_all_profiles_with_http_info(self, **kwargs):",
"the required parameter 'profile_id' is set if ('profile_id' not in params) or (params['profile_id']",
"header `Content-Type` header_params['Content-Type'] = self.api_client.\\ select_header_content_type(['text/plain']) # Authentication setting auth_settings = [] return",
"method get_profile_by_email_address\" % key ) params[key] = val del params['kwargs'] # verify the",
"# coding: utf-8 \"\"\" BillForward REST API OpenAPI spec version: 1.0.0 Generated by:",
"= [] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProfilePagedMetadata', auth_settings=auth_settings,",
"callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def get_profile(self, profile_id, **kwargs): \"\"\" Returns a single profile, specified by",
"is auto generated by the swagger code generator program. Do not edit the",
"are returned in natural order {\\\"nickname\\\":\\\"Retrieve by account\\\",\\\"response\\\":\\\"getProfileByAccountID.html\\\"} This method makes a synchronous",
"self.api_client.\\ select_header_accept(['text/xml', 'application/xml', 'application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type`",
"asynchronous request. (optional) :param str email: The email address of the profile. (required)",
"keyword argument '%s'\" \" to method get_profile_by_email_address\" % key ) params[key] = val",
"parameter `email` when calling `get_profile_by_email_address`\") resource_path = '/profiles/email/{email}'.replace('{format}', 'json') path_params = {} if",
"when calling `get_profile`\") resource_path = '/profiles/{profile-ID}'.replace('{format}', 'json') path_params = {} if 'profile_id' in",
"compliance with the License. You may obtain a copy of the License at",
":param list[str] organizations: A list of organization-IDs used to restrict the scope of",
"if ('account_id' not in params) or (params['account_id'] is None): raise ValueError(\"Missing the required",
"Specify a field used to order the result set. :param str order: Ihe",
"profiles. By default 10 values are returned. Records are returned in natural order",
"\"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_all_profiles_with_http_info(**kwargs) else: (data) = self.get_all_profiles_with_http_info(**kwargs) return",
"else: (data) = self.get_profile_by_account_id_with_http_info(account_id, **kwargs) return data def get_profile_by_account_id_with_http_info(self, account_id, **kwargs): \"\"\" Returns",
"del params['kwargs'] # verify the required parameter 'account_id' is set if ('account_id' not",
"returns the request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_profile_by_email_address_with_http_info(email, **kwargs)",
"request thread. \"\"\" kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_profile_with_http_info(request, **kwargs) else: (data)",
"You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by",
"= {} header_params = {} form_params = [] local_var_files = {} body_params =",
"auto generated by the swagger code generator program. Do not edit the class",
"re # python 2 and python 3 compatibility library from six import iteritems",
"config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client",
"collection of profiles, specified by the account-ID parameter. By default 10 values are",
"applicable law or agreed to in writing, software distributed under the License is",
"Ihe direction of any ordering, either ASC or DESC. :param bool include_retired: Whether",
"= api.get_profile_by_email_address(email, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional)",
"is called asynchronously, returns the request thread. \"\"\" all_params = ['account_id', 'organizations', 'offset',",
"to restrict the scope of API calls. :return: ProfilePagedMetadata If the method is",
"to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>>",
"def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_profile_by_email_address_with_http_info(email, callback=callback_function) :param callback function:",
"_return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_account_id(self, account_id, **kwargs): \"\"\" Returns a collection of profiles, specified by",
"import Configuration from ..api_client import ApiClient class ProfilesApi(object): \"\"\" NOTE: This class is",
"= {} query_params = {} header_params = {} form_params = [] local_var_files =",
">>> pprint(response) >>> >>> thread = api.get_all_profiles(callback=callback_function) :param callback function: The callback function",
"_return_http_data_only=params.get('_return_http_data_only')) def get_profile_by_email_address(self, email, **kwargs): \"\"\" Returns a single profile, specified by the",
"if 'profile_id' in params: path_params['profile-ID'] = params['profile_id'] query_params = {} if 'organizations' in",
"**kwargs): \"\"\" Update a profile {\\\"nickname\\\":\\\"Update a profile\\\",\\\"request\\\":\\\"updateProfileRequest.html\\\",\\\"response\\\":\\\"updateProfileResponse.html\\\"} This method makes a synchronous",
"parameter. {\\\"nickname\\\":\\\"Retrieve by e-mail\\\",\\\"response\\\":\\\"getProfileByEmail.html\\\"} This method makes a synchronous HTTP request by default.",
"scope of API calls. :return: ProfilePagedMetadata If the method is called asynchronously, returns",
"If the method is called asynchronously, returns the request thread. \"\"\" all_params =",
"resource_path = '/profiles'.replace('{format}', 'json') path_params = {} query_params = {} if 'organizations' in",
"any ordering, either ASC or DESC. :return: ProfilePagedMetadata If the method is called",
"are returned. Records are returned in natural order {\\\"nickname\\\":\\\"Get all profiles\\\",\\\"response\\\":\\\"getProfileAll.html\\\"} This method",
"= api.get_profile_by_account_id_with_http_info(account_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional)",
"to method get_profile_by_email_address\" % key ) params[key] = val del params['kwargs'] # verify",
"with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0",
"TypeError( \"Got an unexpected keyword argument '%s'\" \" to method get_profile\" % key",
") params[key] = val del params['kwargs'] resource_path = '/profiles'.replace('{format}', 'json') path_params = {}",
"3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client",
"thread = api.update_profile(request, callback=callback_function) :param callback function: The callback function for asynchronous request."
] |