code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
"""Time client"""
import urllib.request
import json
from datetime import datetime
# make sure that module51-server.py service is running
TIME_URL = "http://localhost:5000/"
def ex51():
with urllib.request.urlopen(TIME_URL) as response:
body = response.read()
parsed = json.loads(body)
date = datetime.fromisoformat(parsed["currentTime"])
stamp = date.strftime("%H:%M:%S %Z %B %m %d")
print("The current time is %s" % stamp)
if __name__ == "__main__":
ex51()
|
normal
|
{
"blob_id": "e8f05a66c642ef3b570130a2996ca27efb8b0cb5",
"index": 5287,
"step-1": "<mask token>\n\n\ndef ex51():\n with urllib.request.urlopen(TIME_URL) as response:\n body = response.read()\n parsed = json.loads(body)\n date = datetime.fromisoformat(parsed['currentTime'])\n stamp = date.strftime('%H:%M:%S %Z %B %m %d')\n print('The current time is %s' % stamp)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef ex51():\n with urllib.request.urlopen(TIME_URL) as response:\n body = response.read()\n parsed = json.loads(body)\n date = datetime.fromisoformat(parsed['currentTime'])\n stamp = date.strftime('%H:%M:%S %Z %B %m %d')\n print('The current time is %s' % stamp)\n\n\nif __name__ == '__main__':\n ex51()\n",
"step-3": "<mask token>\nTIME_URL = 'http://localhost:5000/'\n\n\ndef ex51():\n with urllib.request.urlopen(TIME_URL) as response:\n body = response.read()\n parsed = json.loads(body)\n date = datetime.fromisoformat(parsed['currentTime'])\n stamp = date.strftime('%H:%M:%S %Z %B %m %d')\n print('The current time is %s' % stamp)\n\n\nif __name__ == '__main__':\n ex51()\n",
"step-4": "<mask token>\nimport urllib.request\nimport json\nfrom datetime import datetime\nTIME_URL = 'http://localhost:5000/'\n\n\ndef ex51():\n with urllib.request.urlopen(TIME_URL) as response:\n body = response.read()\n parsed = json.loads(body)\n date = datetime.fromisoformat(parsed['currentTime'])\n stamp = date.strftime('%H:%M:%S %Z %B %m %d')\n print('The current time is %s' % stamp)\n\n\nif __name__ == '__main__':\n ex51()\n",
"step-5": "\"\"\"Time client\"\"\"\n\nimport urllib.request\nimport json\nfrom datetime import datetime\n\n# make sure that module51-server.py service is running\nTIME_URL = \"http://localhost:5000/\"\n\ndef ex51():\n with urllib.request.urlopen(TIME_URL) as response:\n body = response.read()\n parsed = json.loads(body)\n date = datetime.fromisoformat(parsed[\"currentTime\"])\n stamp = date.strftime(\"%H:%M:%S %Z %B %m %d\")\n print(\"The current time is %s\" % stamp)\n\nif __name__ == \"__main__\":\n ex51()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Su sueldo mensual sera')
print(sueldo)
<|reserved_special_token_1|>
c_horas = int(input('Ingrese la cantidad de horas trabajadas:'))
v_horas = int(input('Ingrese el valor de cada hora trabajada:'))
sueldo = c_horas * v_horas
print('Su sueldo mensual sera')
print(sueldo)
<|reserved_special_token_1|>
c_horas=int(input("Ingrese la cantidad de horas trabajadas:"))
v_horas=int(input("Ingrese el valor de cada hora trabajada:"))
sueldo=c_horas*v_horas
print("Su sueldo mensual sera")
print(sueldo)
|
flexible
|
{
"blob_id": "2e4b47b8c3ac4f187b32f1013a34c3bea354b519",
"index": 6817,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Su sueldo mensual sera')\nprint(sueldo)\n",
"step-3": "c_horas = int(input('Ingrese la cantidad de horas trabajadas:'))\nv_horas = int(input('Ingrese el valor de cada hora trabajada:'))\nsueldo = c_horas * v_horas\nprint('Su sueldo mensual sera')\nprint(sueldo)\n",
"step-4": "c_horas=int(input(\"Ingrese la cantidad de horas trabajadas:\"))\r\nv_horas=int(input(\"Ingrese el valor de cada hora trabajada:\"))\r\nsueldo=c_horas*v_horas\r\nprint(\"Su sueldo mensual sera\")\r\nprint(sueldo)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import platform, sys, os, ensurepip
ensurepip.bootstrap()
try:
import pip
except ImportError:
print("Error: Failed to install pip, make sure you are running this script as admin.")
sys.exit()
arch = platform.architecture()[0]
wheelUrl = "https://raw.githubusercontent.com/Starfox64/pygame-installer/master/wheels/"
print("You are using Python" + str(sys.version_info[0]) + str(sys.version_info[1]) + " " + arch + ".")
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
if arch == "64bit":
wheelUrl += "pygame-1.9.2b1-cp27-cp27m-win_amd64.whl"
else:
wheelUrl += "pygame-1.9.2b1-cp27-cp27m-win32.whl"
elif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):
if sys.version_info[1] == 4:
if arch == "64bit":
wheelUrl += "pygame-1.9.2b1-cp34-cp34m-win_amd64.whl"
else:
wheelUrl += "pygame-1.9.2b1-cp34-cp34m-win32.whl"
elif sys.version_info[1] == 5:
if arch == "64bit":
wheelUrl += "pygame-1.9.2b1-cp35-cp35m-win_amd64.whl"
else:
wheelUrl += "pygame-1.9.2b1-cp35-cp35m-win32.whl"
elif sys.version_info[1] == 6:
if arch == "64bit":
wheelUrl += "pygame-1.9.2b8-cp36-cp36m-win_amd64.whl"
else:
wheelUrl += "pygame-1.9.2b8-cp36-cp36m-win32.whl"
else:
print("Pygame only supports Python 27, 34, 35 and 36.")
sys.exit()
if pip.main(["install", wheelUrl]) == 0:
print("Pygame should now be installed.")
else:
print("Something went wrong during the installation of pygame.")
os.system("pause")
|
normal
|
{
"blob_id": "b44f75db652b3a40cd9475bfe44027724e845252",
"index": 1146,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nensurepip.bootstrap()\ntry:\n import pip\nexcept ImportError:\n print(\n 'Error: Failed to install pip, make sure you are running this script as admin.'\n )\n sys.exit()\n<mask token>\nprint('You are using Python' + str(sys.version_info[0]) + str(sys.\n version_info[1]) + ' ' + arch + '.')\nif sys.version_info[0] == 2 and sys.version_info[1] == 7:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win32.whl'\nelif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):\n if sys.version_info[1] == 4:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win32.whl'\n elif sys.version_info[1] == 5:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win32.whl'\n elif sys.version_info[1] == 6:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win32.whl'\nelse:\n print('Pygame only supports Python 27, 34, 35 and 36.')\n sys.exit()\nif pip.main(['install', wheelUrl]) == 0:\n print('Pygame should now be installed.')\nelse:\n print('Something went wrong during the installation of pygame.')\nos.system('pause')\n",
"step-3": "<mask token>\nensurepip.bootstrap()\ntry:\n import pip\nexcept ImportError:\n print(\n 'Error: Failed to install pip, make sure you are running this script as admin.'\n )\n sys.exit()\narch = platform.architecture()[0]\nwheelUrl = (\n 'https://raw.githubusercontent.com/Starfox64/pygame-installer/master/wheels/'\n )\nprint('You are using Python' + str(sys.version_info[0]) + str(sys.\n version_info[1]) + ' ' + arch + '.')\nif sys.version_info[0] == 2 and sys.version_info[1] == 7:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win32.whl'\nelif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):\n if sys.version_info[1] == 4:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win32.whl'\n elif sys.version_info[1] == 5:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win32.whl'\n elif sys.version_info[1] == 6:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win32.whl'\nelse:\n print('Pygame only supports Python 27, 34, 35 and 36.')\n sys.exit()\nif pip.main(['install', wheelUrl]) == 0:\n print('Pygame should now be installed.')\nelse:\n print('Something went wrong during the installation of pygame.')\nos.system('pause')\n",
"step-4": "import platform, sys, os, ensurepip\nensurepip.bootstrap()\ntry:\n import pip\nexcept ImportError:\n print(\n 'Error: Failed to install pip, make sure you are running this script as admin.'\n )\n sys.exit()\narch = platform.architecture()[0]\nwheelUrl = (\n 'https://raw.githubusercontent.com/Starfox64/pygame-installer/master/wheels/'\n )\nprint('You are using Python' + str(sys.version_info[0]) + str(sys.\n version_info[1]) + ' ' + arch + '.')\nif sys.version_info[0] == 2 and sys.version_info[1] == 7:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win32.whl'\nelif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):\n if sys.version_info[1] == 4:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win32.whl'\n elif sys.version_info[1] == 5:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win32.whl'\n elif sys.version_info[1] == 6:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win32.whl'\nelse:\n print('Pygame only supports Python 27, 34, 35 and 36.')\n sys.exit()\nif pip.main(['install', wheelUrl]) == 0:\n print('Pygame should now be installed.')\nelse:\n print('Something went wrong during the installation of pygame.')\nos.system('pause')\n",
"step-5": "import platform, sys, os, ensurepip\r\n\nensurepip.bootstrap()\n\ntry:\n\timport pip\nexcept ImportError:\n\tprint(\"Error: Failed to install pip, make sure you are running this script as admin.\")\n\tsys.exit()\n\narch = platform.architecture()[0]\r\nwheelUrl = \"https://raw.githubusercontent.com/Starfox64/pygame-installer/master/wheels/\"\r\n\r\nprint(\"You are using Python\" + str(sys.version_info[0]) + str(sys.version_info[1]) + \" \" + arch + \".\")\r\n\r\nif sys.version_info[0] == 2 and sys.version_info[1] == 7:\r\n\tif arch == \"64bit\":\r\n\t\twheelUrl += \"pygame-1.9.2b1-cp27-cp27m-win_amd64.whl\"\r\n\telse:\r\n\t\twheelUrl += \"pygame-1.9.2b1-cp27-cp27m-win32.whl\"\r\nelif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):\r\n\tif sys.version_info[1] == 4:\r\n\t\tif arch == \"64bit\":\r\n\t\t\twheelUrl += \"pygame-1.9.2b1-cp34-cp34m-win_amd64.whl\"\r\n\t\telse:\r\n\t\t\twheelUrl += \"pygame-1.9.2b1-cp34-cp34m-win32.whl\"\r\n\telif sys.version_info[1] == 5:\r\n\t\tif arch == \"64bit\":\r\n\t\t\twheelUrl += \"pygame-1.9.2b1-cp35-cp35m-win_amd64.whl\"\r\n\t\telse:\r\n\t\t\twheelUrl += \"pygame-1.9.2b1-cp35-cp35m-win32.whl\"\r\n\telif sys.version_info[1] == 6:\r\n\t\tif arch == \"64bit\":\r\n\t\t\twheelUrl += \"pygame-1.9.2b8-cp36-cp36m-win_amd64.whl\"\r\n\t\telse:\r\n\t\t\twheelUrl += \"pygame-1.9.2b8-cp36-cp36m-win32.whl\"\r\nelse:\r\n\tprint(\"Pygame only supports Python 27, 34, 35 and 36.\")\r\n\tsys.exit()\r\n\r\nif pip.main([\"install\", wheelUrl]) == 0:\r\n\tprint(\"Pygame should now be installed.\")\r\nelse:\r\n\tprint(\"Something went wrong during the installation of pygame.\")\r\n\r\nos.system(\"pause\")\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def draw_point(x, y):
plt.scatter(x, y)
plt.title('点分布图')
plt.xlabel('x轴')
plt.ylabel('y轴')
plt.grid(True)
plt.show()
def draw_route(route_list, x, y):
plt.scatter(x, y)
for route in route_list:
route = np.array(route)
plt.plot(route[:, 0], route[:, 1])
plt.title('路径图')
plt.xlabel('x轴')
plt.ylabel('y轴')
plt.grid(True)
plt.show()
def read_data(path, node):
csv_data = pd.read_csv(path)
x = csv_data['Easting']
y = csv_data['Southing']
for i in range(len(x)):
xy = []
xy.append(x[i])
xy.append(y[i])
node.append(xy)
node_sort = sorted(node, key=lambda x: (x[0], x[1]))
return node_sort, x, y
def init_routing(route_number, route_list, leading_edge, node_sort):
for n in node_sort:
if n == node_sort[0]:
continue
route = []
route.append(node_sort[0])
route.append(n)
route_list.append(route)
leading_edge.append(n)
if len(route_list) >= route_number:
return route_list
return
def expand(route_list, leading_edge, node_sort, route_number):
for i in range(len(node_sort)):
if i <= route_number:
continue
y_min = 0
max_index = 0
for a in range(len(leading_edge)):
if leading_edge[a][1] > y_min:
y_min = leading_edge[a][1]
max_index = a
index = -1
for n in range(len(leading_edge)):
delta_y = leading_edge[n][1] - node_sort[i][1]
if (delta_y >= 0) & (delta_y < y_min):
y_min = delta_y
index = n
if index < 0:
index = max_index
route_list[index].append(node_sort[i])
leading_edge[index] = node_sort[i]
return route_list
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def draw_point(x, y):
plt.scatter(x, y)
plt.title('点分布图')
plt.xlabel('x轴')
plt.ylabel('y轴')
plt.grid(True)
plt.show()
def draw_route(route_list, x, y):
plt.scatter(x, y)
for route in route_list:
route = np.array(route)
plt.plot(route[:, 0], route[:, 1])
plt.title('路径图')
plt.xlabel('x轴')
plt.ylabel('y轴')
plt.grid(True)
plt.show()
def read_data(path, node):
csv_data = pd.read_csv(path)
x = csv_data['Easting']
y = csv_data['Southing']
for i in range(len(x)):
xy = []
xy.append(x[i])
xy.append(y[i])
node.append(xy)
node_sort = sorted(node, key=lambda x: (x[0], x[1]))
return node_sort, x, y
def init_routing(route_number, route_list, leading_edge, node_sort):
for n in node_sort:
if n == node_sort[0]:
continue
route = []
route.append(node_sort[0])
route.append(n)
route_list.append(route)
leading_edge.append(n)
if len(route_list) >= route_number:
return route_list
return
def expand(route_list, leading_edge, node_sort, route_number):
for i in range(len(node_sort)):
if i <= route_number:
continue
y_min = 0
max_index = 0
for a in range(len(leading_edge)):
if leading_edge[a][1] > y_min:
y_min = leading_edge[a][1]
max_index = a
index = -1
for n in range(len(leading_edge)):
delta_y = leading_edge[n][1] - node_sort[i][1]
if (delta_y >= 0) & (delta_y < y_min):
y_min = delta_y
index = n
if index < 0:
index = max_index
route_list[index].append(node_sort[i])
leading_edge[index] = node_sort[i]
return route_list
if __name__ == '__main__':
path = 'coordinates v1.csv'
node = []
route_list = []
leading_edge = []
route_number = 6
node_sort, x, y = read_data(path, node)
route_list = init_routing(route_number, route_list, leading_edge, node_sort
)
route_list = expand(route_list, leading_edge, node_sort, route_number)
route_list = np.array(route_list)
draw_route(route_list, x, y)
print(route_list)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.rcParams['font.sans-serif'] = ['FangSong']
plt.rcParams['axes.unicode_minus'] = False
def draw_point(x, y):
plt.scatter(x, y)
plt.title('点分布图')
plt.xlabel('x轴')
plt.ylabel('y轴')
plt.grid(True)
plt.show()
def draw_route(route_list, x, y):
plt.scatter(x, y)
for route in route_list:
route = np.array(route)
plt.plot(route[:, 0], route[:, 1])
plt.title('路径图')
plt.xlabel('x轴')
plt.ylabel('y轴')
plt.grid(True)
plt.show()
def read_data(path, node):
csv_data = pd.read_csv(path)
x = csv_data['Easting']
y = csv_data['Southing']
for i in range(len(x)):
xy = []
xy.append(x[i])
xy.append(y[i])
node.append(xy)
node_sort = sorted(node, key=lambda x: (x[0], x[1]))
return node_sort, x, y
def init_routing(route_number, route_list, leading_edge, node_sort):
for n in node_sort:
if n == node_sort[0]:
continue
route = []
route.append(node_sort[0])
route.append(n)
route_list.append(route)
leading_edge.append(n)
if len(route_list) >= route_number:
return route_list
return
def expand(route_list, leading_edge, node_sort, route_number):
for i in range(len(node_sort)):
if i <= route_number:
continue
y_min = 0
max_index = 0
for a in range(len(leading_edge)):
if leading_edge[a][1] > y_min:
y_min = leading_edge[a][1]
max_index = a
index = -1
for n in range(len(leading_edge)):
delta_y = leading_edge[n][1] - node_sort[i][1]
if (delta_y >= 0) & (delta_y < y_min):
y_min = delta_y
index = n
if index < 0:
index = max_index
route_list[index].append(node_sort[i])
leading_edge[index] = node_sort[i]
return route_list
if __name__ == '__main__':
path = 'coordinates v1.csv'
node = []
route_list = []
leading_edge = []
route_number = 6
node_sort, x, y = read_data(path, node)
route_list = init_routing(route_number, route_list, leading_edge, node_sort
)
route_list = expand(route_list, leading_edge, node_sort, route_number)
route_list = np.array(route_list)
draw_route(route_list, x, y)
print(route_list)
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['font.sans-serif'] = ['FangSong']
plt.rcParams['axes.unicode_minus'] = False
def draw_point(x, y):
plt.scatter(x, y)
plt.title('点分布图')
plt.xlabel('x轴')
plt.ylabel('y轴')
plt.grid(True)
plt.show()
def draw_route(route_list, x, y):
plt.scatter(x, y)
for route in route_list:
route = np.array(route)
plt.plot(route[:, 0], route[:, 1])
plt.title('路径图')
plt.xlabel('x轴')
plt.ylabel('y轴')
plt.grid(True)
plt.show()
def read_data(path, node):
csv_data = pd.read_csv(path)
x = csv_data['Easting']
y = csv_data['Southing']
for i in range(len(x)):
xy = []
xy.append(x[i])
xy.append(y[i])
node.append(xy)
node_sort = sorted(node, key=lambda x: (x[0], x[1]))
return node_sort, x, y
def init_routing(route_number, route_list, leading_edge, node_sort):
for n in node_sort:
if n == node_sort[0]:
continue
route = []
route.append(node_sort[0])
route.append(n)
route_list.append(route)
leading_edge.append(n)
if len(route_list) >= route_number:
return route_list
return
def expand(route_list, leading_edge, node_sort, route_number):
for i in range(len(node_sort)):
if i <= route_number:
continue
y_min = 0
max_index = 0
for a in range(len(leading_edge)):
if leading_edge[a][1] > y_min:
y_min = leading_edge[a][1]
max_index = a
index = -1
for n in range(len(leading_edge)):
delta_y = leading_edge[n][1] - node_sort[i][1]
if (delta_y >= 0) & (delta_y < y_min):
y_min = delta_y
index = n
if index < 0:
index = max_index
route_list[index].append(node_sort[i])
leading_edge[index] = node_sort[i]
return route_list
if __name__ == '__main__':
path = 'coordinates v1.csv'
node = []
route_list = []
leading_edge = []
route_number = 6
node_sort, x, y = read_data(path, node)
route_list = init_routing(route_number, route_list, leading_edge, node_sort
)
route_list = expand(route_list, leading_edge, node_sort, route_number)
route_list = np.array(route_list)
draw_route(route_list, x, y)
print(route_list)
<|reserved_special_token_1|>
#Write by Jess.S 25/1/2019
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
def draw_point(x,y):
plt.scatter(x, y)
plt.title('点分布图')#显示图表标题
plt.xlabel('x轴')#x轴名称
plt.ylabel('y轴')#y轴名称
plt.grid(True)#显示网格线
plt.show()
def draw_route(route_list,x,y):
plt.scatter(x, y)
for route in route_list:
route= np.array(route)
# print(route.shape)
plt.plot(route[:,0],route[:,1])
plt.title('路径图')#显示图表标题
plt.xlabel('x轴')#x轴名称
plt.ylabel('y轴')#y轴名称
plt.grid(True)#显示网格线
plt.show()
def read_data(path,node):
csv_data = pd.read_csv(path) # 读取训练数据
# print(csv_data)
x = csv_data['Easting']
y = csv_data['Southing']
# print(x)
# print(y)
for i in range(len(x)):
xy = []
xy.append(x[i])
xy.append(y[i])
node.append(xy)
# print(node)
node_sort =sorted(node, key=lambda x: (x[0], x[1]))
# print(node_sort)
#另一种利用numpy的排序方法
# node = np.array(node)
# node = node[np.lexsort(node[:,::-1].T)]
# print(node)
return node_sort,x,y
#判断前沿面的点是否被更新
# def dominant(prev,current):
# if prev[0]<current[0] & prev[1]<current[1]:
# return True
# return False
#
# #判断两条路径是否有重叠部分
# def judge_line(origin,n1,n2):
# if((n1[1]-origin[1])/(n1[0]-origin[0])==(n2[1]-origin[1])/(n2[0]-origin[0])):
# return True
# return False
def init_routing(route_number,route_list,leading_edge,node_sort):
for n in node_sort:
if(n == node_sort[0]):
continue
route = []
route.append(node_sort[0])
route.append(n)
route_list.append(route)
leading_edge.append(n)
if(len(route_list)>=route_number):
return route_list
return
def expand(route_list,leading_edge,node_sort,route_number):
for i in range(len(node_sort)):
if(i<=route_number):
continue
y_min = 0
max_index = 0
for a in range(len(leading_edge)):
if(leading_edge[a][1]>y_min):
y_min = leading_edge[a][1]
max_index = a
index = -1
for n in range(len(leading_edge)):
delta_y = leading_edge[n][1] - node_sort[i][1]
if((delta_y>=0) & (delta_y<y_min)):
y_min = delta_y
index = n
if(index < 0):
index = max_index
route_list[index].append(node_sort[i])
leading_edge[index] = node_sort[i]
return route_list
if __name__=='__main__':
path = 'coordinates v1.csv'
node = []#所有点的坐标信息,下面进行排序
route_list = []#存储现有的路径信息
leading_edge = []#存储路径最前沿延续的路径index
route_number = 6
node_sort,x,y = read_data(path, node)
route_list = init_routing(route_number, route_list, leading_edge,node_sort)
route_list = expand(route_list, leading_edge, node_sort, route_number)
route_list = np.array(route_list)
draw_route(route_list,x,y)
print(route_list)
|
flexible
|
{
"blob_id": "1c60620814a4aea2573caf99cee87590a8d57c18",
"index": 5483,
"step-1": "<mask token>\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\nif __name__ == '__main__':\n path = 'coordinates v1.csv'\n node = []\n route_list = []\n leading_edge = []\n route_number = 6\n node_sort, x, y = read_data(path, node)\n route_list = init_routing(route_number, route_list, leading_edge, node_sort\n )\n route_list = expand(route_list, leading_edge, node_sort, route_number)\n route_list = np.array(route_list)\n draw_route(route_list, x, y)\n print(route_list)\n",
"step-3": "<mask token>\nplt.rcParams['font.sans-serif'] = ['FangSong']\nplt.rcParams['axes.unicode_minus'] = False\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\nif __name__ == '__main__':\n path = 'coordinates v1.csv'\n node = []\n route_list = []\n leading_edge = []\n route_number = 6\n node_sort, x, y = read_data(path, node)\n route_list = init_routing(route_number, route_list, leading_edge, node_sort\n )\n route_list = expand(route_list, leading_edge, node_sort, route_number)\n route_list = np.array(route_list)\n draw_route(route_list, x, y)\n print(route_list)\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.rcParams['font.sans-serif'] = ['FangSong']\nplt.rcParams['axes.unicode_minus'] = False\n\n\ndef draw_point(x, y):\n plt.scatter(x, y)\n plt.title('点分布图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef draw_route(route_list, x, y):\n plt.scatter(x, y)\n for route in route_list:\n route = np.array(route)\n plt.plot(route[:, 0], route[:, 1])\n plt.title('路径图')\n plt.xlabel('x轴')\n plt.ylabel('y轴')\n plt.grid(True)\n plt.show()\n\n\ndef read_data(path, node):\n csv_data = pd.read_csv(path)\n x = csv_data['Easting']\n y = csv_data['Southing']\n for i in range(len(x)):\n xy = []\n xy.append(x[i])\n xy.append(y[i])\n node.append(xy)\n node_sort = sorted(node, key=lambda x: (x[0], x[1]))\n return node_sort, x, y\n\n\ndef init_routing(route_number, route_list, leading_edge, node_sort):\n for n in node_sort:\n if n == node_sort[0]:\n continue\n route = []\n route.append(node_sort[0])\n route.append(n)\n route_list.append(route)\n leading_edge.append(n)\n if len(route_list) >= route_number:\n return route_list\n return\n\n\ndef expand(route_list, leading_edge, node_sort, route_number):\n for i in range(len(node_sort)):\n if i <= route_number:\n continue\n y_min = 0\n max_index = 0\n for a in range(len(leading_edge)):\n if leading_edge[a][1] > y_min:\n y_min = leading_edge[a][1]\n max_index = a\n index = -1\n for n in range(len(leading_edge)):\n delta_y = leading_edge[n][1] - node_sort[i][1]\n if (delta_y >= 0) & (delta_y < y_min):\n y_min = delta_y\n index = n\n if index < 0:\n index = max_index\n route_list[index].append(node_sort[i])\n leading_edge[index] = node_sort[i]\n return route_list\n\n\nif __name__ == '__main__':\n path = 'coordinates v1.csv'\n node = []\n route_list = []\n leading_edge = []\n route_number = 6\n node_sort, x, y = read_data(path, node)\n route_list = init_routing(route_number, route_list, leading_edge, node_sort\n )\n route_list = expand(route_list, leading_edge, node_sort, route_number)\n route_list = np.array(route_list)\n draw_route(route_list, x, y)\n print(route_list)\n",
"step-5": "#Write by Jess.S 25/1/2019\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nplt.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体\r\nplt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\r\n\r\ndef draw_point(x,y):\r\n plt.scatter(x, y)\r\n plt.title('点分布图')#显示图表标题\r\n plt.xlabel('x轴')#x轴名称\r\n plt.ylabel('y轴')#y轴名称\r\n plt.grid(True)#显示网格线\r\n plt.show()\r\n\r\ndef draw_route(route_list,x,y):\r\n plt.scatter(x, y)\r\n for route in route_list:\r\n route= np.array(route)\r\n# print(route.shape)\r\n plt.plot(route[:,0],route[:,1])\r\n plt.title('路径图')#显示图表标题\r\n plt.xlabel('x轴')#x轴名称\r\n plt.ylabel('y轴')#y轴名称\r\n plt.grid(True)#显示网格线\r\n plt.show()\r\n \r\ndef read_data(path,node):\r\n csv_data = pd.read_csv(path) # 读取训练数据\r\n # print(csv_data)\r\n x = csv_data['Easting']\r\n y = csv_data['Southing']\r\n\r\n # print(x)\r\n # print(y)\r\n for i in range(len(x)):\r\n xy = []\r\n xy.append(x[i])\r\n xy.append(y[i])\r\n node.append(xy)\r\n # print(node)\r\n node_sort =sorted(node, key=lambda x: (x[0], x[1]))\r\n # print(node_sort)\r\n #另一种利用numpy的排序方法\r\n \r\n # node = np.array(node)\r\n # node = node[np.lexsort(node[:,::-1].T)]\r\n # print(node)\r\n return node_sort,x,y\r\n#判断前沿面的点是否被更新\r\n# def dominant(prev,current):\r\n# if prev[0]<current[0] & prev[1]<current[1]:\r\n# return True\r\n# return False\r\n# \r\n# #判断两条路径是否有重叠部分\r\n# def judge_line(origin,n1,n2):\r\n# if((n1[1]-origin[1])/(n1[0]-origin[0])==(n2[1]-origin[1])/(n2[0]-origin[0])):\r\n# return True\r\n# return False\r\n\r\ndef init_routing(route_number,route_list,leading_edge,node_sort): \r\n for n in node_sort:\r\n if(n == node_sort[0]):\r\n continue\r\n route = []\r\n route.append(node_sort[0])\r\n route.append(n)\r\n route_list.append(route)\r\n leading_edge.append(n)\r\n if(len(route_list)>=route_number):\r\n return route_list\r\n return\r\n \r\ndef expand(route_list,leading_edge,node_sort,route_number):\r\n for i in range(len(node_sort)):\r\n if(i<=route_number):\r\n continue\r\n y_min = 0\r\n max_index = 0\r\n for a in range(len(leading_edge)):\r\n if(leading_edge[a][1]>y_min):\r\n y_min = leading_edge[a][1]\r\n max_index = a\r\n index = -1\r\n for n in range(len(leading_edge)):\r\n delta_y = leading_edge[n][1] - node_sort[i][1]\r\n if((delta_y>=0) & (delta_y<y_min)):\r\n y_min = delta_y\r\n index = n\r\n if(index < 0):\r\n index = max_index \r\n route_list[index].append(node_sort[i])\r\n leading_edge[index] = node_sort[i]\r\n return route_list \r\n\r\nif __name__=='__main__':\r\n path = 'coordinates v1.csv'\r\n node = []#所有点的坐标信息,下面进行排序\r\n route_list = []#存储现有的路径信息\r\n leading_edge = []#存储路径最前沿延续的路径index\r\n route_number = 6\r\n node_sort,x,y = read_data(path, node)\r\n route_list = init_routing(route_number, route_list, leading_edge,node_sort)\r\n route_list = expand(route_list, leading_edge, node_sort, route_number)\r\n route_list = np.array(route_list)\r\n draw_route(route_list,x,y)\r\n print(route_list)\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Login to weibo.com')
<|reserved_special_token_0|>
req.add_header('Host', 'chenshuaijun.com')
req.add_header('User-Agent',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'
)
with request.urlopen(req, data=login_data.encode('utf-8')) as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Login to weibo.com')
email = input('Email:')
passwd = input('Password:')
login_data = parse.urlencode([('username', email), ('password', passwd), (
'entry', 'mwei'), ('client_id', ''), ('savestate', 1), ('ec', ''), (
'pagerefer',
'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F'
)])
req = request.Request('https://chenshuaijun.com')
req.add_header('Host', 'chenshuaijun.com')
req.add_header('User-Agent',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'
)
with request.urlopen(req, data=login_data.encode('utf-8')) as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8'))
<|reserved_special_token_1|>
from urllib import request, parse
print('Login to weibo.com')
email = input('Email:')
passwd = input('Password:')
login_data = parse.urlencode([('username', email), ('password', passwd), (
'entry', 'mwei'), ('client_id', ''), ('savestate', 1), ('ec', ''), (
'pagerefer',
'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F'
)])
req = request.Request('https://chenshuaijun.com')
req.add_header('Host', 'chenshuaijun.com')
req.add_header('User-Agent',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'
)
with request.urlopen(req, data=login_data.encode('utf-8')) as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8'))
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from urllib import request,parse
# req = request.Request('https://api.douban.com/v2/book/2129650')
# req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36')
# with request.urlopen(req) as f:
# data = f.read()
# print('Status:', f.status, f.reason)
# for k, v in f.getheaders():
# print('%s:%s' % (k, v))
# print('Data:', data.decode('utf-8'))
print('Login to weibo.com')
email = input('Email:')
passwd = input('Password:')
login_data = parse.urlencode([
('username', email),
('password', passwd),
('entry', 'mwei'),
('client_id', ''),
('savestate', 1),
('ec', ''),
('pagerefer', 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F')
])
req = request.Request('https://chenshuaijun.com')
req.add_header('Host', 'chenshuaijun.com')
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36')
with request.urlopen(req, data=login_data.encode('utf-8')) as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8'))
|
flexible
|
{
"blob_id": "9bd63181de024c2f4517defa9ed51bdbc8d610d2",
"index": 6025,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Login to weibo.com')\n<mask token>\nreq.add_header('Host', 'chenshuaijun.com')\nreq.add_header('User-Agent',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'\n )\nwith request.urlopen(req, data=login_data.encode('utf-8')) as f:\n print('Status:', f.status, f.reason)\n for k, v in f.getheaders():\n print('%s: %s' % (k, v))\n print('Data:', f.read().decode('utf-8'))\n",
"step-3": "<mask token>\nprint('Login to weibo.com')\nemail = input('Email:')\npasswd = input('Password:')\nlogin_data = parse.urlencode([('username', email), ('password', passwd), (\n 'entry', 'mwei'), ('client_id', ''), ('savestate', 1), ('ec', ''), (\n 'pagerefer',\n 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F'\n )])\nreq = request.Request('https://chenshuaijun.com')\nreq.add_header('Host', 'chenshuaijun.com')\nreq.add_header('User-Agent',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'\n )\nwith request.urlopen(req, data=login_data.encode('utf-8')) as f:\n print('Status:', f.status, f.reason)\n for k, v in f.getheaders():\n print('%s: %s' % (k, v))\n print('Data:', f.read().decode('utf-8'))\n",
"step-4": "from urllib import request, parse\nprint('Login to weibo.com')\nemail = input('Email:')\npasswd = input('Password:')\nlogin_data = parse.urlencode([('username', email), ('password', passwd), (\n 'entry', 'mwei'), ('client_id', ''), ('savestate', 1), ('ec', ''), (\n 'pagerefer',\n 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F'\n )])\nreq = request.Request('https://chenshuaijun.com')\nreq.add_header('Host', 'chenshuaijun.com')\nreq.add_header('User-Agent',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36'\n )\nwith request.urlopen(req, data=login_data.encode('utf-8')) as f:\n print('Status:', f.status, f.reason)\n for k, v in f.getheaders():\n print('%s: %s' % (k, v))\n print('Data:', f.read().decode('utf-8'))\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom urllib import request,parse\n\n# req = request.Request('https://api.douban.com/v2/book/2129650')\n# req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36')\n# with request.urlopen(req) as f:\n# data = f.read()\n# print('Status:', f.status, f.reason)\n# for k, v in f.getheaders():\n# print('%s:%s' % (k, v))\n# print('Data:', data.decode('utf-8'))\n\nprint('Login to weibo.com')\nemail = input('Email:')\npasswd = input('Password:')\nlogin_data = parse.urlencode([\n ('username', email),\n ('password', passwd),\n ('entry', 'mwei'),\n ('client_id', ''),\n ('savestate', 1),\n ('ec', ''),\n ('pagerefer', 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F')\n])\n\nreq = request.Request('https://chenshuaijun.com')\nreq.add_header('Host', 'chenshuaijun.com')\nreq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36')\n\nwith request.urlopen(req, data=login_data.encode('utf-8')) as f:\n print('Status:', f.status, f.reason)\n for k, v in f.getheaders():\n print('%s: %s' % (k, v))\n print('Data:', f.read().decode('utf-8'))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
import sys
import numpy
from gensim import corpora
from coherence.wn import WordNetEvaluator
from topic.topic import Topic
from nltk.corpus import wordnet as wn
from nltk.corpus import reuters
from nltk.corpus import brown
# python random_tc.py <dname> <word_count> <sample_times> <output>
# <word_count>: the number of words that need to be randomly generated
# <sample_times>: the repetition times of the topic coherence calculation
if len(sys.argv) <= 1:
dname = "reuters_LDA"
else:
dname = sys.argv[1]
if len(sys.argv) <= 2:
word_count = 10
else:
word_count = int(sys.argv[2])
if len(sys.argv) <= 3:
sample_times = 5
else:
sample_times = int(sys.argv[3])
if len(sys.argv) <= 4:
tcmethod = "path"
else:
tcmethod = sys.argv[4]
print tcmethod
if len(sys.argv) <= 5:
ic = False
else:
if sys.argv[5] == "ic":
ic = True
else:
ic = False
dictionary = corpora.Dictionary.load(dname + "/dict.dict")
print "Load dictionary",
print dictionary
corpus_fname = dname + '/bow_corpus.mm'
print "Load Corpus File " + corpus_fname
corpus = corpora.MmCorpus(corpus_fname)
# transfer each doc in the corpus into a dictionary
corpus_dict = []
for doc in corpus:
corpus_dict.append(dict(doc))
dictlen = len(dictionary)
tc = WordNetEvaluator()
tc_means = []
tc_medians = []
words_list = []
ofilemean = open(dname + "/"+tcmethod+"_mean_rand_"+str(word_count)+".txt", "w")
ofilemedian = open(dname + "/"+tcmethod+"_median_rand_"+str(word_count)+".txt", "w")
if ic:
if dname == "reuters_LDA":
src_ic = wn.ic(reuters, False, 0.0)
else:
src_ic = wn.ic(brown, False, 0.0)
for i in range(sample_times):
random_words = []
# generate random numbers
for n in range(word_count):
word = random.randint(1, dictlen-1)
while word in random_words:
word = random.randint(0, dictlen-1)
random_words.append(word)
keylist = []
for key in random_words:
keylist.append(dictionary[key])
words_list.append(keylist)
randt = Topic()
for key in keylist:
randt.add((key, 0.1))
# calculate topic coherence based on randomly generated words
if ic:
result = tc.evaluate_ic(randt, word_count, src_ic, tcmethod, not_write=True)
else:
result = tc.evaluate(randt, word_count, tcmethod, not_write=True)
if (not numpy.isnan(result[1])) and result[1] < 10000:
rmean = result[1]
else:
rmean = 0.0
if (not numpy.isnan(result[2])) and result[1] < 10000:
rmedian = result[2]
else:
rmedian = 0.0
tc_means.append(rmean)
tc_medians.append(rmedian)
ofilemean.write("Mean: " + str(numpy.mean(tc_means)) + "\n")
ofilemean.write("SD: " + str(numpy.std(tc_means)) + "\n\n")
for item in tc_means:
ofilemean.write(str(item) + "\n")
for item in words_list:
ofilemean.write(str(item) + "\n")
ofilemedian.write("Mean: " + str(numpy.mean(tc_medians)) + "\n")
ofilemedian.write("SD: " + str(numpy.std(tc_medians)) + "\n\n")
for item in tc_medians:
ofilemedian.write(str(item) + "\n")
for item in words_list:
ofilemedian.write(str(item) + "\n")
|
normal
|
{
"blob_id": "2d7e3a70f1c25bbc7ad5eafa006ab12c978eaec4",
"index": 1115,
"step-1": "import random\nimport sys\n\nimport numpy\nfrom gensim import corpora\n\nfrom coherence.wn import WordNetEvaluator\nfrom topic.topic import Topic\nfrom nltk.corpus import wordnet as wn\nfrom nltk.corpus import reuters\nfrom nltk.corpus import brown\n# python random_tc.py <dname> <word_count> <sample_times> <output>\n# <word_count>: the number of words that need to be randomly generated\n# <sample_times>: the repetition times of the topic coherence calculation\n\nif len(sys.argv) <= 1:\n dname = \"reuters_LDA\"\nelse:\n dname = sys.argv[1]\n\nif len(sys.argv) <= 2:\n word_count = 10\nelse:\n word_count = int(sys.argv[2])\n\nif len(sys.argv) <= 3:\n sample_times = 5\nelse:\n sample_times = int(sys.argv[3])\n\nif len(sys.argv) <= 4:\n tcmethod = \"path\"\nelse:\n tcmethod = sys.argv[4]\n print tcmethod\n\nif len(sys.argv) <= 5:\n ic = False\nelse:\n if sys.argv[5] == \"ic\":\n ic = True\n else:\n ic = False\n\n\ndictionary = corpora.Dictionary.load(dname + \"/dict.dict\")\nprint \"Load dictionary\",\nprint dictionary\ncorpus_fname = dname + '/bow_corpus.mm'\nprint \"Load Corpus File \" + corpus_fname\ncorpus = corpora.MmCorpus(corpus_fname)\n\n# transfer each doc in the corpus into a dictionary\ncorpus_dict = []\nfor doc in corpus:\n corpus_dict.append(dict(doc))\ndictlen = len(dictionary)\n\ntc = WordNetEvaluator()\n\ntc_means = []\ntc_medians = []\nwords_list = []\n\nofilemean = open(dname + \"/\"+tcmethod+\"_mean_rand_\"+str(word_count)+\".txt\", \"w\")\nofilemedian = open(dname + \"/\"+tcmethod+\"_median_rand_\"+str(word_count)+\".txt\", \"w\")\n\nif ic:\n if dname == \"reuters_LDA\":\n src_ic = wn.ic(reuters, False, 0.0)\n else:\n src_ic = wn.ic(brown, False, 0.0)\n\n\n\nfor i in range(sample_times):\n random_words = []\n # generate random numbers\n for n in range(word_count):\n word = random.randint(1, dictlen-1)\n while word in random_words:\n word = random.randint(0, dictlen-1)\n random_words.append(word)\n\n keylist = []\n for key in random_words:\n keylist.append(dictionary[key])\n words_list.append(keylist)\n\n randt = Topic()\n for key in keylist:\n randt.add((key, 0.1))\n\n # calculate topic coherence based on randomly generated words\n if ic:\n result = tc.evaluate_ic(randt, word_count, src_ic, tcmethod, not_write=True)\n else:\n result = tc.evaluate(randt, word_count, tcmethod, not_write=True)\n\n if (not numpy.isnan(result[1])) and result[1] < 10000:\n rmean = result[1]\n else:\n rmean = 0.0\n\n if (not numpy.isnan(result[2])) and result[1] < 10000:\n rmedian = result[2]\n else:\n rmedian = 0.0\n \n tc_means.append(rmean)\n tc_medians.append(rmedian)\n\nofilemean.write(\"Mean: \" + str(numpy.mean(tc_means)) + \"\\n\")\nofilemean.write(\"SD: \" + str(numpy.std(tc_means)) + \"\\n\\n\")\nfor item in tc_means:\n ofilemean.write(str(item) + \"\\n\")\n\nfor item in words_list:\n ofilemean.write(str(item) + \"\\n\")\n\nofilemedian.write(\"Mean: \" + str(numpy.mean(tc_medians)) + \"\\n\")\nofilemedian.write(\"SD: \" + str(numpy.std(tc_medians)) + \"\\n\\n\")\nfor item in tc_medians:\n ofilemedian.write(str(item) + \"\\n\")\n\nfor item in words_list:\n ofilemedian.write(str(item) + \"\\n\")\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class ScheduleAdmin(admin.ModelAdmin):
"""model admin for schedules"""
list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',
'cluster', 'next_run', 'last_run', 'success')
if not croniter:
readonly_fields = 'cron',
list_filter = 'next_run', 'schedule_type', 'cluster'
search_fields = 'func',
list_display_links = 'id', 'name'
class QueueAdmin(admin.ModelAdmin):
"""queue admin for ORM broker"""
list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'
def save_model(self, request, obj, form, change):
obj.save(using=Conf.ORM)
def delete_model(self, request, obj):
obj.delete(using=Conf.ORM)
def get_queryset(self, request):
return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)
def has_add_permission(self, request):
"""Don't allow adds."""
return False
list_filter = 'key',
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TaskAdmin(admin.ModelAdmin):
"""model admin for success tasks."""
list_display = 'name', 'func', 'started', 'stopped', 'time_taken', 'group'
def has_add_permission(self, request):
"""Don't allow adds."""
return False
def get_queryset(self, request):
"""Only show successes."""
qs = super(TaskAdmin, self).get_queryset(request)
return qs.filter(success=True)
search_fields = 'name', 'func', 'group'
readonly_fields = []
list_filter = 'group',
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj.
_meta.fields]
<|reserved_special_token_0|>
class FailAdmin(admin.ModelAdmin):
"""model admin for failed tasks."""
list_display = 'name', 'func', 'started', 'stopped', 'short_result'
def has_add_permission(self, request):
"""Don't allow adds."""
return False
actions = [retry_failed]
search_fields = 'name', 'func'
list_filter = 'group',
readonly_fields = []
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj.
_meta.fields]
class ScheduleAdmin(admin.ModelAdmin):
"""model admin for schedules"""
list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',
'cluster', 'next_run', 'last_run', 'success')
if not croniter:
readonly_fields = 'cron',
list_filter = 'next_run', 'schedule_type', 'cluster'
search_fields = 'func',
list_display_links = 'id', 'name'
class QueueAdmin(admin.ModelAdmin):
"""queue admin for ORM broker"""
list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'
def save_model(self, request, obj, form, change):
obj.save(using=Conf.ORM)
def delete_model(self, request, obj):
obj.delete(using=Conf.ORM)
def get_queryset(self, request):
return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)
def has_add_permission(self, request):
"""Don't allow adds."""
return False
list_filter = 'key',
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TaskAdmin(admin.ModelAdmin):
"""model admin for success tasks."""
list_display = 'name', 'func', 'started', 'stopped', 'time_taken', 'group'
def has_add_permission(self, request):
"""Don't allow adds."""
return False
def get_queryset(self, request):
"""Only show successes."""
qs = super(TaskAdmin, self).get_queryset(request)
return qs.filter(success=True)
search_fields = 'name', 'func', 'group'
readonly_fields = []
list_filter = 'group',
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj.
_meta.fields]
def retry_failed(FailAdmin, request, queryset):
"""Submit selected tasks back to the queue."""
for task in queryset:
async_task(task.func, *(task.args or ()), hook=task.hook, **task.
kwargs or {})
task.delete()
<|reserved_special_token_0|>
class FailAdmin(admin.ModelAdmin):
"""model admin for failed tasks."""
list_display = 'name', 'func', 'started', 'stopped', 'short_result'
def has_add_permission(self, request):
"""Don't allow adds."""
return False
actions = [retry_failed]
search_fields = 'name', 'func'
list_filter = 'group',
readonly_fields = []
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj.
_meta.fields]
class ScheduleAdmin(admin.ModelAdmin):
"""model admin for schedules"""
list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',
'cluster', 'next_run', 'last_run', 'success')
if not croniter:
readonly_fields = 'cron',
list_filter = 'next_run', 'schedule_type', 'cluster'
search_fields = 'func',
list_display_links = 'id', 'name'
class QueueAdmin(admin.ModelAdmin):
"""queue admin for ORM broker"""
list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'
def save_model(self, request, obj, form, change):
obj.save(using=Conf.ORM)
def delete_model(self, request, obj):
obj.delete(using=Conf.ORM)
def get_queryset(self, request):
return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)
def has_add_permission(self, request):
"""Don't allow adds."""
return False
list_filter = 'key',
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TaskAdmin(admin.ModelAdmin):
"""model admin for success tasks."""
list_display = 'name', 'func', 'started', 'stopped', 'time_taken', 'group'
def has_add_permission(self, request):
"""Don't allow adds."""
return False
def get_queryset(self, request):
"""Only show successes."""
qs = super(TaskAdmin, self).get_queryset(request)
return qs.filter(success=True)
search_fields = 'name', 'func', 'group'
readonly_fields = []
list_filter = 'group',
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj.
_meta.fields]
def retry_failed(FailAdmin, request, queryset):
"""Submit selected tasks back to the queue."""
for task in queryset:
async_task(task.func, *(task.args or ()), hook=task.hook, **task.
kwargs or {})
task.delete()
<|reserved_special_token_0|>
class FailAdmin(admin.ModelAdmin):
"""model admin for failed tasks."""
list_display = 'name', 'func', 'started', 'stopped', 'short_result'
def has_add_permission(self, request):
"""Don't allow adds."""
return False
actions = [retry_failed]
search_fields = 'name', 'func'
list_filter = 'group',
readonly_fields = []
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj.
_meta.fields]
class ScheduleAdmin(admin.ModelAdmin):
"""model admin for schedules"""
list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',
'cluster', 'next_run', 'last_run', 'success')
if not croniter:
readonly_fields = 'cron',
list_filter = 'next_run', 'schedule_type', 'cluster'
search_fields = 'func',
list_display_links = 'id', 'name'
class QueueAdmin(admin.ModelAdmin):
"""queue admin for ORM broker"""
list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'
def save_model(self, request, obj, form, change):
obj.save(using=Conf.ORM)
def delete_model(self, request, obj):
obj.delete(using=Conf.ORM)
def get_queryset(self, request):
return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)
def has_add_permission(self, request):
"""Don't allow adds."""
return False
list_filter = 'key',
admin.site.register(Schedule, ScheduleAdmin)
admin.site.register(Success, TaskAdmin)
admin.site.register(Failure, FailAdmin)
if Conf.ORM or Conf.TESTING:
admin.site.register(OrmQ, QueueAdmin)
<|reserved_special_token_1|>
"""Admin module for Django."""
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from django_q.conf import Conf, croniter
from django_q.models import Failure, OrmQ, Schedule, Success
from django_q.tasks import async_task
class TaskAdmin(admin.ModelAdmin):
"""model admin for success tasks."""
list_display = ("name", "func", "started", "stopped", "time_taken", "group")
def has_add_permission(self, request):
"""Don't allow adds."""
return False
def get_queryset(self, request):
"""Only show successes."""
qs = super(TaskAdmin, self).get_queryset(request)
return qs.filter(success=True)
search_fields = ("name", "func", "group")
readonly_fields = []
list_filter = ("group",)
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj._meta.fields]
def retry_failed(FailAdmin, request, queryset):
"""Submit selected tasks back to the queue."""
for task in queryset:
async_task(task.func, *task.args or (), hook=task.hook, **task.kwargs or {})
task.delete()
retry_failed.short_description = _("Resubmit selected tasks to queue")
class FailAdmin(admin.ModelAdmin):
"""model admin for failed tasks."""
list_display = ("name", "func", "started", "stopped", "short_result")
def has_add_permission(self, request):
"""Don't allow adds."""
return False
actions = [retry_failed]
search_fields = ("name", "func")
list_filter = ("group",)
readonly_fields = []
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj._meta.fields]
class ScheduleAdmin(admin.ModelAdmin):
"""model admin for schedules"""
list_display = (
"id",
"name",
"func",
"schedule_type",
"repeats",
"cluster",
"next_run",
"last_run",
"success",
)
# optional cron strings
if not croniter:
readonly_fields = ("cron",)
list_filter = ("next_run", "schedule_type", "cluster")
search_fields = ("func",)
list_display_links = ("id", "name")
class QueueAdmin(admin.ModelAdmin):
"""queue admin for ORM broker"""
list_display = ("id", "key", "task_id", "name", "func", "lock")
def save_model(self, request, obj, form, change):
obj.save(using=Conf.ORM)
def delete_model(self, request, obj):
obj.delete(using=Conf.ORM)
def get_queryset(self, request):
return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)
def has_add_permission(self, request):
"""Don't allow adds."""
return False
list_filter = ("key",)
admin.site.register(Schedule, ScheduleAdmin)
admin.site.register(Success, TaskAdmin)
admin.site.register(Failure, FailAdmin)
if Conf.ORM or Conf.TESTING:
admin.site.register(OrmQ, QueueAdmin)
|
flexible
|
{
"blob_id": "5aebebb7f22e094a1a897b3266ff07d59400b76c",
"index": 2209,
"step-1": "<mask token>\n\n\nclass ScheduleAdmin(admin.ModelAdmin):\n \"\"\"model admin for schedules\"\"\"\n list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',\n 'cluster', 'next_run', 'last_run', 'success')\n if not croniter:\n readonly_fields = 'cron',\n list_filter = 'next_run', 'schedule_type', 'cluster'\n search_fields = 'func',\n list_display_links = 'id', 'name'\n\n\nclass QueueAdmin(admin.ModelAdmin):\n \"\"\"queue admin for ORM broker\"\"\"\n list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'\n\n def save_model(self, request, obj, form, change):\n obj.save(using=Conf.ORM)\n\n def delete_model(self, request, obj):\n obj.delete(using=Conf.ORM)\n\n def get_queryset(self, request):\n return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n list_filter = 'key',\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TaskAdmin(admin.ModelAdmin):\n \"\"\"model admin for success tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'time_taken', 'group'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n def get_queryset(self, request):\n \"\"\"Only show successes.\"\"\"\n qs = super(TaskAdmin, self).get_queryset(request)\n return qs.filter(success=True)\n search_fields = 'name', 'func', 'group'\n readonly_fields = []\n list_filter = 'group',\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\n<mask token>\n\n\nclass FailAdmin(admin.ModelAdmin):\n \"\"\"model admin for failed tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'short_result'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n actions = [retry_failed]\n search_fields = 'name', 'func'\n list_filter = 'group',\n readonly_fields = []\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\nclass ScheduleAdmin(admin.ModelAdmin):\n \"\"\"model admin for schedules\"\"\"\n list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',\n 'cluster', 'next_run', 'last_run', 'success')\n if not croniter:\n readonly_fields = 'cron',\n list_filter = 'next_run', 'schedule_type', 'cluster'\n search_fields = 'func',\n list_display_links = 'id', 'name'\n\n\nclass QueueAdmin(admin.ModelAdmin):\n \"\"\"queue admin for ORM broker\"\"\"\n list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'\n\n def save_model(self, request, obj, form, change):\n obj.save(using=Conf.ORM)\n\n def delete_model(self, request, obj):\n obj.delete(using=Conf.ORM)\n\n def get_queryset(self, request):\n return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n list_filter = 'key',\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TaskAdmin(admin.ModelAdmin):\n \"\"\"model admin for success tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'time_taken', 'group'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n def get_queryset(self, request):\n \"\"\"Only show successes.\"\"\"\n qs = super(TaskAdmin, self).get_queryset(request)\n return qs.filter(success=True)\n search_fields = 'name', 'func', 'group'\n readonly_fields = []\n list_filter = 'group',\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\ndef retry_failed(FailAdmin, request, queryset):\n \"\"\"Submit selected tasks back to the queue.\"\"\"\n for task in queryset:\n async_task(task.func, *(task.args or ()), hook=task.hook, **task.\n kwargs or {})\n task.delete()\n\n\n<mask token>\n\n\nclass FailAdmin(admin.ModelAdmin):\n \"\"\"model admin for failed tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'short_result'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n actions = [retry_failed]\n search_fields = 'name', 'func'\n list_filter = 'group',\n readonly_fields = []\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\nclass ScheduleAdmin(admin.ModelAdmin):\n \"\"\"model admin for schedules\"\"\"\n list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',\n 'cluster', 'next_run', 'last_run', 'success')\n if not croniter:\n readonly_fields = 'cron',\n list_filter = 'next_run', 'schedule_type', 'cluster'\n search_fields = 'func',\n list_display_links = 'id', 'name'\n\n\nclass QueueAdmin(admin.ModelAdmin):\n \"\"\"queue admin for ORM broker\"\"\"\n list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'\n\n def save_model(self, request, obj, form, change):\n obj.save(using=Conf.ORM)\n\n def delete_model(self, request, obj):\n obj.delete(using=Conf.ORM)\n\n def get_queryset(self, request):\n return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n list_filter = 'key',\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TaskAdmin(admin.ModelAdmin):\n \"\"\"model admin for success tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'time_taken', 'group'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n def get_queryset(self, request):\n \"\"\"Only show successes.\"\"\"\n qs = super(TaskAdmin, self).get_queryset(request)\n return qs.filter(success=True)\n search_fields = 'name', 'func', 'group'\n readonly_fields = []\n list_filter = 'group',\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\ndef retry_failed(FailAdmin, request, queryset):\n \"\"\"Submit selected tasks back to the queue.\"\"\"\n for task in queryset:\n async_task(task.func, *(task.args or ()), hook=task.hook, **task.\n kwargs or {})\n task.delete()\n\n\n<mask token>\n\n\nclass FailAdmin(admin.ModelAdmin):\n \"\"\"model admin for failed tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'short_result'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n actions = [retry_failed]\n search_fields = 'name', 'func'\n list_filter = 'group',\n readonly_fields = []\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\nclass ScheduleAdmin(admin.ModelAdmin):\n \"\"\"model admin for schedules\"\"\"\n list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',\n 'cluster', 'next_run', 'last_run', 'success')\n if not croniter:\n readonly_fields = 'cron',\n list_filter = 'next_run', 'schedule_type', 'cluster'\n search_fields = 'func',\n list_display_links = 'id', 'name'\n\n\nclass QueueAdmin(admin.ModelAdmin):\n \"\"\"queue admin for ORM broker\"\"\"\n list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'\n\n def save_model(self, request, obj, form, change):\n obj.save(using=Conf.ORM)\n\n def delete_model(self, request, obj):\n obj.delete(using=Conf.ORM)\n\n def get_queryset(self, request):\n return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n list_filter = 'key',\n\n\nadmin.site.register(Schedule, ScheduleAdmin)\nadmin.site.register(Success, TaskAdmin)\nadmin.site.register(Failure, FailAdmin)\nif Conf.ORM or Conf.TESTING:\n admin.site.register(OrmQ, QueueAdmin)\n",
"step-5": "\"\"\"Admin module for Django.\"\"\"\nfrom django.contrib import admin\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django_q.conf import Conf, croniter\nfrom django_q.models import Failure, OrmQ, Schedule, Success\nfrom django_q.tasks import async_task\n\n\nclass TaskAdmin(admin.ModelAdmin):\n \"\"\"model admin for success tasks.\"\"\"\n\n list_display = (\"name\", \"func\", \"started\", \"stopped\", \"time_taken\", \"group\")\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n def get_queryset(self, request):\n \"\"\"Only show successes.\"\"\"\n qs = super(TaskAdmin, self).get_queryset(request)\n return qs.filter(success=True)\n\n search_fields = (\"name\", \"func\", \"group\")\n readonly_fields = []\n list_filter = (\"group\",)\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj._meta.fields]\n\n\ndef retry_failed(FailAdmin, request, queryset):\n \"\"\"Submit selected tasks back to the queue.\"\"\"\n for task in queryset:\n async_task(task.func, *task.args or (), hook=task.hook, **task.kwargs or {})\n task.delete()\n\n\nretry_failed.short_description = _(\"Resubmit selected tasks to queue\")\n\n\nclass FailAdmin(admin.ModelAdmin):\n \"\"\"model admin for failed tasks.\"\"\"\n\n list_display = (\"name\", \"func\", \"started\", \"stopped\", \"short_result\")\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n actions = [retry_failed]\n search_fields = (\"name\", \"func\")\n list_filter = (\"group\",)\n readonly_fields = []\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj._meta.fields]\n\n\nclass ScheduleAdmin(admin.ModelAdmin):\n \"\"\"model admin for schedules\"\"\"\n\n list_display = (\n \"id\",\n \"name\",\n \"func\",\n \"schedule_type\",\n \"repeats\",\n \"cluster\",\n \"next_run\",\n \"last_run\",\n \"success\",\n )\n\n # optional cron strings\n if not croniter:\n readonly_fields = (\"cron\",)\n\n list_filter = (\"next_run\", \"schedule_type\", \"cluster\")\n search_fields = (\"func\",)\n list_display_links = (\"id\", \"name\")\n\n\nclass QueueAdmin(admin.ModelAdmin):\n \"\"\"queue admin for ORM broker\"\"\"\n\n list_display = (\"id\", \"key\", \"task_id\", \"name\", \"func\", \"lock\")\n\n def save_model(self, request, obj, form, change):\n obj.save(using=Conf.ORM)\n\n def delete_model(self, request, obj):\n obj.delete(using=Conf.ORM)\n\n def get_queryset(self, request):\n return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n list_filter = (\"key\",)\n\n\nadmin.site.register(Schedule, ScheduleAdmin)\nadmin.site.register(Success, TaskAdmin)\nadmin.site.register(Failure, FailAdmin)\n\nif Conf.ORM or Conf.TESTING:\n admin.site.register(OrmQ, QueueAdmin)\n",
"step-ids": [
10,
21,
22,
23,
26
]
}
|
[
10,
21,
22,
23,
26
] |
from django import template
import ast
register = template.Library()
@register.simple_tag()
def multiplication(value, arg, *args, **kwargs):
return value * arg
@register.filter
def in_category(things, category):
return things.filter(category=category)
@register.simple_tag()
def division(value, arg, *args, **kwargs):
return value / arg
@register.simple_tag()
def add(value, arg, *args, **kwargs):
return value + arg
|
normal
|
{
"blob_id": "9339d3bc0c3005880b1c8d1c9914d6e28d39dbbd",
"index": 7285,
"step-1": "<mask token>\n\n\n@register.simple_tag()\ndef multiplication(value, arg, *args, **kwargs):\n return value * arg\n\n\n@register.filter\ndef in_category(things, category):\n return things.filter(category=category)\n\n\n@register.simple_tag()\ndef division(value, arg, *args, **kwargs):\n return value / arg\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@register.simple_tag()\ndef multiplication(value, arg, *args, **kwargs):\n return value * arg\n\n\n@register.filter\ndef in_category(things, category):\n return things.filter(category=category)\n\n\n@register.simple_tag()\ndef division(value, arg, *args, **kwargs):\n return value / arg\n\n\n@register.simple_tag()\ndef add(value, arg, *args, **kwargs):\n return value + arg\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\n@register.simple_tag()\ndef multiplication(value, arg, *args, **kwargs):\n return value * arg\n\n\n@register.filter\ndef in_category(things, category):\n return things.filter(category=category)\n\n\n@register.simple_tag()\ndef division(value, arg, *args, **kwargs):\n return value / arg\n\n\n@register.simple_tag()\ndef add(value, arg, *args, **kwargs):\n return value + arg\n",
"step-4": "from django import template\nimport ast\nregister = template.Library()\n\n\n@register.simple_tag()\ndef multiplication(value, arg, *args, **kwargs):\n return value * arg\n\n\n@register.filter\ndef in_category(things, category):\n return things.filter(category=category)\n\n\n@register.simple_tag()\ndef division(value, arg, *args, **kwargs):\n return value / arg\n\n\n@register.simple_tag()\ndef add(value, arg, *args, **kwargs):\n return value + arg\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
import os
from typing import List
from pypinyin import pinyin, lazy_pinyin
# map vowel-number combination to unicode
toneMap = {
"d": ['ā', 'ē', 'ī', 'ō', 'ū', 'ǜ'],
"f": ['á', 'é', 'í', 'ó', 'ú', 'ǘ'],
"j": ['ǎ', 'ě', 'ǐ', 'ǒ', 'ǔ', 'ǚ'],
"k": ['à', 'è', 'ì', 'ò', 'ù', 'ǜ'],
}
weightMap = {}
def getWeightMap():
with open(os.path.join(os.path.dirname(__file__),
"../cells/cubor-base.dict.yaml"), "r", encoding='utf8') as base:
_lines = base.readlines()
for wunit in _lines:
units = wunit.split('\t')
if len(units) == 3 and len(units[0]) == 1: # 保证不是空行、且为单字,并一定能得到权重
units[2] = units[2][:-1]
if weightMap.get(units[0]) is None:
weightMap.__setitem__(units[0], units[2])
else: # 说明权重字典中已经有了该词
if int(units[2]) > int(weightMap.get(units[0])): # 如果权重更大则更新,我们的声调码 权重要高!
weightMap.__setitem__(units[0], units[2])
def flatten(a):
for each in a:
if not isinstance(each, list):
yield each
else:
yield from flatten(each)
def getPinyins(c: List[List[str]]) -> list:
return list(flatten(pinyin(c, heteronym=True)))
def getToneKeys(tone: str) -> str:
for toneKey, toneList in toneMap.items():
for toneChar in toneList:
if tone.__contains__(toneChar):
return toneKey
return 'l'
def readSingleCharYaml():
retlines = []
with open(os.path.join(os.path.dirname(__file__),
"../cubor/cubor-single.dict.yaml"), "r", encoding='utf8') as yaml:
line = yaml.readline()
# 准备追加音调
while len(line) is not 0:
chars = line.split('\t')
if len(chars) is 3:
# 获得词语内容、本来的读音
word, srcPy = chars[0], chars[1]
tones = getPinyins(pinyin(word, heteronym=True))
tempToneKeys = []
for tone in tones:
toneKey = getToneKeys(tone)
if toneKey not in tempToneKeys: # 如果类似 啊 这样的字有多音 a e 都是一声就避免重复
tempToneKeys.append(toneKey)
# base-dict 来源于 pinyin_simp 如果按简体字表配置的权重Map 中找不到,则说明是权重很低的繁体字
weight = weightMap.get(word) if weightMap.get(word) is not None else '0'
line = f'{word}\t{srcPy}{toneKey}\t{weight}\r\n' # 部署时用
# line = f'{word}\t{toneKey}\t{weight}\r\n' # 重构单字声调码表用
retlines.append(line)
# fullPinyin = lazy_pinyin(word)[0] if len(lazy_pinyin(word)) > 0 else ''
# if len(fullPinyin) > 0 and fullPinyin != srcPy:
# retlines.append(f'{word}\t{fullPinyin}{toneKey}\t{weight}\r\n') # 部署时用
line = yaml.readline()
with open(os.path.join(os.path.dirname(__file__),
"../cells/cubor-base.dict.yaml"), "a", encoding='utf8') as writer: # 部署时用
# "../cubor/cubor-tones.dict.yaml"), "w", encoding='utf8') as writer: # 重构单字码表时用
writer.writelines(retlines)
if __name__ == '__main__':
print('[INFO] - 构建库珀输入法 声调表...')
getWeightMap() # 先找到权重表
readSingleCharYaml() # 再写入声调码表
print('[INFO] - 构建完成!')
|
normal
|
{
"blob_id": "e50c1ef7368aabf53bc0cfd45e19101fa1519a1f",
"index": 6245,
"step-1": "<mask token>\n\n\ndef getWeightMap():\n with open(os.path.join(os.path.dirname(__file__),\n '../cells/cubor-base.dict.yaml'), 'r', encoding='utf8') as base:\n _lines = base.readlines()\n for wunit in _lines:\n units = wunit.split('\\t')\n if len(units) == 3 and len(units[0]) == 1:\n units[2] = units[2][:-1]\n if weightMap.get(units[0]) is None:\n weightMap.__setitem__(units[0], units[2])\n elif int(units[2]) > int(weightMap.get(units[0])):\n weightMap.__setitem__(units[0], units[2])\n\n\ndef flatten(a):\n for each in a:\n if not isinstance(each, list):\n yield each\n else:\n yield from flatten(each)\n\n\ndef getPinyins(c: List[List[str]]) ->list:\n return list(flatten(pinyin(c, heteronym=True)))\n\n\ndef getToneKeys(tone: str) ->str:\n for toneKey, toneList in toneMap.items():\n for toneChar in toneList:\n if tone.__contains__(toneChar):\n return toneKey\n return 'l'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getWeightMap():\n with open(os.path.join(os.path.dirname(__file__),\n '../cells/cubor-base.dict.yaml'), 'r', encoding='utf8') as base:\n _lines = base.readlines()\n for wunit in _lines:\n units = wunit.split('\\t')\n if len(units) == 3 and len(units[0]) == 1:\n units[2] = units[2][:-1]\n if weightMap.get(units[0]) is None:\n weightMap.__setitem__(units[0], units[2])\n elif int(units[2]) > int(weightMap.get(units[0])):\n weightMap.__setitem__(units[0], units[2])\n\n\ndef flatten(a):\n for each in a:\n if not isinstance(each, list):\n yield each\n else:\n yield from flatten(each)\n\n\ndef getPinyins(c: List[List[str]]) ->list:\n return list(flatten(pinyin(c, heteronym=True)))\n\n\ndef getToneKeys(tone: str) ->str:\n for toneKey, toneList in toneMap.items():\n for toneChar in toneList:\n if tone.__contains__(toneChar):\n return toneKey\n return 'l'\n\n\ndef readSingleCharYaml():\n retlines = []\n with open(os.path.join(os.path.dirname(__file__),\n '../cubor/cubor-single.dict.yaml'), 'r', encoding='utf8') as yaml:\n line = yaml.readline()\n while len(line) is not 0:\n chars = line.split('\\t')\n if len(chars) is 3:\n word, srcPy = chars[0], chars[1]\n tones = getPinyins(pinyin(word, heteronym=True))\n tempToneKeys = []\n for tone in tones:\n toneKey = getToneKeys(tone)\n if toneKey not in tempToneKeys:\n tempToneKeys.append(toneKey)\n weight = weightMap.get(word) if weightMap.get(word\n ) is not None else '0'\n line = f'{word}\\t{srcPy}{toneKey}\\t{weight}\\r\\n'\n retlines.append(line)\n line = yaml.readline()\n with open(os.path.join(os.path.dirname(__file__),\n '../cells/cubor-base.dict.yaml'), 'a', encoding='utf8') as writer:\n writer.writelines(retlines)\n\n\n<mask token>\n",
"step-3": "<mask token>\ntoneMap = {'d': ['ā', 'ē', 'ī', 'ō', 'ū', 'ǜ'], 'f': ['á', 'é', 'í', 'ó',\n 'ú', 'ǘ'], 'j': ['ǎ', 'ě', 'ǐ', 'ǒ', 'ǔ', 'ǚ'], 'k': ['à', 'è', 'ì',\n 'ò', 'ù', 'ǜ']}\nweightMap = {}\n\n\ndef getWeightMap():\n with open(os.path.join(os.path.dirname(__file__),\n '../cells/cubor-base.dict.yaml'), 'r', encoding='utf8') as base:\n _lines = base.readlines()\n for wunit in _lines:\n units = wunit.split('\\t')\n if len(units) == 3 and len(units[0]) == 1:\n units[2] = units[2][:-1]\n if weightMap.get(units[0]) is None:\n weightMap.__setitem__(units[0], units[2])\n elif int(units[2]) > int(weightMap.get(units[0])):\n weightMap.__setitem__(units[0], units[2])\n\n\ndef flatten(a):\n for each in a:\n if not isinstance(each, list):\n yield each\n else:\n yield from flatten(each)\n\n\ndef getPinyins(c: List[List[str]]) ->list:\n return list(flatten(pinyin(c, heteronym=True)))\n\n\ndef getToneKeys(tone: str) ->str:\n for toneKey, toneList in toneMap.items():\n for toneChar in toneList:\n if tone.__contains__(toneChar):\n return toneKey\n return 'l'\n\n\ndef readSingleCharYaml():\n retlines = []\n with open(os.path.join(os.path.dirname(__file__),\n '../cubor/cubor-single.dict.yaml'), 'r', encoding='utf8') as yaml:\n line = yaml.readline()\n while len(line) is not 0:\n chars = line.split('\\t')\n if len(chars) is 3:\n word, srcPy = chars[0], chars[1]\n tones = getPinyins(pinyin(word, heteronym=True))\n tempToneKeys = []\n for tone in tones:\n toneKey = getToneKeys(tone)\n if toneKey not in tempToneKeys:\n tempToneKeys.append(toneKey)\n weight = weightMap.get(word) if weightMap.get(word\n ) is not None else '0'\n line = f'{word}\\t{srcPy}{toneKey}\\t{weight}\\r\\n'\n retlines.append(line)\n line = yaml.readline()\n with open(os.path.join(os.path.dirname(__file__),\n '../cells/cubor-base.dict.yaml'), 'a', encoding='utf8') as writer:\n writer.writelines(retlines)\n\n\nif __name__ == '__main__':\n print('[INFO] - 构建库珀输入法 声调表...')\n getWeightMap()\n readSingleCharYaml()\n print('[INFO] - 构建完成!')\n",
"step-4": "import os\nfrom typing import List\nfrom pypinyin import pinyin, lazy_pinyin\ntoneMap = {'d': ['ā', 'ē', 'ī', 'ō', 'ū', 'ǜ'], 'f': ['á', 'é', 'í', 'ó',\n 'ú', 'ǘ'], 'j': ['ǎ', 'ě', 'ǐ', 'ǒ', 'ǔ', 'ǚ'], 'k': ['à', 'è', 'ì',\n 'ò', 'ù', 'ǜ']}\nweightMap = {}\n\n\ndef getWeightMap():\n with open(os.path.join(os.path.dirname(__file__),\n '../cells/cubor-base.dict.yaml'), 'r', encoding='utf8') as base:\n _lines = base.readlines()\n for wunit in _lines:\n units = wunit.split('\\t')\n if len(units) == 3 and len(units[0]) == 1:\n units[2] = units[2][:-1]\n if weightMap.get(units[0]) is None:\n weightMap.__setitem__(units[0], units[2])\n elif int(units[2]) > int(weightMap.get(units[0])):\n weightMap.__setitem__(units[0], units[2])\n\n\ndef flatten(a):\n for each in a:\n if not isinstance(each, list):\n yield each\n else:\n yield from flatten(each)\n\n\ndef getPinyins(c: List[List[str]]) ->list:\n return list(flatten(pinyin(c, heteronym=True)))\n\n\ndef getToneKeys(tone: str) ->str:\n for toneKey, toneList in toneMap.items():\n for toneChar in toneList:\n if tone.__contains__(toneChar):\n return toneKey\n return 'l'\n\n\ndef readSingleCharYaml():\n retlines = []\n with open(os.path.join(os.path.dirname(__file__),\n '../cubor/cubor-single.dict.yaml'), 'r', encoding='utf8') as yaml:\n line = yaml.readline()\n while len(line) is not 0:\n chars = line.split('\\t')\n if len(chars) is 3:\n word, srcPy = chars[0], chars[1]\n tones = getPinyins(pinyin(word, heteronym=True))\n tempToneKeys = []\n for tone in tones:\n toneKey = getToneKeys(tone)\n if toneKey not in tempToneKeys:\n tempToneKeys.append(toneKey)\n weight = weightMap.get(word) if weightMap.get(word\n ) is not None else '0'\n line = f'{word}\\t{srcPy}{toneKey}\\t{weight}\\r\\n'\n retlines.append(line)\n line = yaml.readline()\n with open(os.path.join(os.path.dirname(__file__),\n '../cells/cubor-base.dict.yaml'), 'a', encoding='utf8') as writer:\n writer.writelines(retlines)\n\n\nif __name__ == '__main__':\n print('[INFO] - 构建库珀输入法 声调表...')\n getWeightMap()\n readSingleCharYaml()\n print('[INFO] - 构建完成!')\n",
"step-5": "import os\nfrom typing import List\n\nfrom pypinyin import pinyin, lazy_pinyin\n\n# map vowel-number combination to unicode\ntoneMap = {\n \"d\": ['ā', 'ē', 'ī', 'ō', 'ū', 'ǜ'],\n \"f\": ['á', 'é', 'í', 'ó', 'ú', 'ǘ'],\n \"j\": ['ǎ', 'ě', 'ǐ', 'ǒ', 'ǔ', 'ǚ'],\n \"k\": ['à', 'è', 'ì', 'ò', 'ù', 'ǜ'],\n}\n\nweightMap = {}\n\n\ndef getWeightMap():\n with open(os.path.join(os.path.dirname(__file__),\n \"../cells/cubor-base.dict.yaml\"), \"r\", encoding='utf8') as base:\n _lines = base.readlines()\n for wunit in _lines:\n units = wunit.split('\\t')\n if len(units) == 3 and len(units[0]) == 1: # 保证不是空行、且为单字,并一定能得到权重\n units[2] = units[2][:-1]\n if weightMap.get(units[0]) is None:\n weightMap.__setitem__(units[0], units[2])\n else: # 说明权重字典中已经有了该词\n if int(units[2]) > int(weightMap.get(units[0])): # 如果权重更大则更新,我们的声调码 权重要高!\n weightMap.__setitem__(units[0], units[2])\n\n\ndef flatten(a):\n for each in a:\n if not isinstance(each, list):\n yield each\n else:\n yield from flatten(each)\n\n\ndef getPinyins(c: List[List[str]]) -> list:\n return list(flatten(pinyin(c, heteronym=True)))\n\n\ndef getToneKeys(tone: str) -> str:\n for toneKey, toneList in toneMap.items():\n for toneChar in toneList:\n if tone.__contains__(toneChar):\n return toneKey\n return 'l'\n\n\ndef readSingleCharYaml():\n retlines = []\n with open(os.path.join(os.path.dirname(__file__),\n \"../cubor/cubor-single.dict.yaml\"), \"r\", encoding='utf8') as yaml:\n line = yaml.readline()\n\n # 准备追加音调\n while len(line) is not 0:\n chars = line.split('\\t')\n if len(chars) is 3:\n # 获得词语内容、本来的读音\n word, srcPy = chars[0], chars[1]\n tones = getPinyins(pinyin(word, heteronym=True))\n\n tempToneKeys = []\n for tone in tones:\n toneKey = getToneKeys(tone)\n if toneKey not in tempToneKeys: # 如果类似 啊 这样的字有多音 a e 都是一声就避免重复\n tempToneKeys.append(toneKey)\n\n # base-dict 来源于 pinyin_simp 如果按简体字表配置的权重Map 中找不到,则说明是权重很低的繁体字\n weight = weightMap.get(word) if weightMap.get(word) is not None else '0'\n\n line = f'{word}\\t{srcPy}{toneKey}\\t{weight}\\r\\n' # 部署时用\n # line = f'{word}\\t{toneKey}\\t{weight}\\r\\n' # 重构单字声调码表用\n retlines.append(line)\n\n # fullPinyin = lazy_pinyin(word)[0] if len(lazy_pinyin(word)) > 0 else ''\n # if len(fullPinyin) > 0 and fullPinyin != srcPy:\n # retlines.append(f'{word}\\t{fullPinyin}{toneKey}\\t{weight}\\r\\n') # 部署时用\n\n line = yaml.readline()\n\n with open(os.path.join(os.path.dirname(__file__),\n \"../cells/cubor-base.dict.yaml\"), \"a\", encoding='utf8') as writer: # 部署时用\n # \"../cubor/cubor-tones.dict.yaml\"), \"w\", encoding='utf8') as writer: # 重构单字码表时用\n writer.writelines(retlines)\n\n\nif __name__ == '__main__':\n print('[INFO] - 构建库珀输入法 声调表...')\n getWeightMap() # 先找到权重表\n readSingleCharYaml() # 再写入声调码表\n print('[INFO] - 构建完成!')\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
""" DB models.
"""
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from db.session import map_engine, replay_engine
MapBase = declarative_base(bind=map_engine)
ReplayBase = declarative_base(bind=replay_engine)
class Map(MapBase):
__tablename__ = 'map'
id = Column(Integer, primary_key=True)
name = Column(String)
size_x = Column(Integer)
size_y = Column(Integer)
lines = relationship('Line', backref='map', lazy='dynamic')
points = relationship('Point', backref='map', lazy='dynamic')
posts = relationship('Post', backref='map', lazy='dynamic')
def __repr__(self):
return "<Map(id='{}', name='{}', size_x='{}', size_y='{}')>".format(
self.id, self.name, self.size_x, self.size_y)
class Line(MapBase):
__tablename__ = 'line'
id = Column(Integer, primary_key=True)
len = Column(Integer)
p0 = Column(Integer)
p1 = Column(Integer)
map_id = Column(Integer, ForeignKey('map.id'))
def __repr__(self):
return "<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>".format(
self.id, self.len, self.p0, self.p1, self.map_id)
class Point(MapBase):
__tablename__ = 'point'
id = Column(Integer, primary_key=True)
map_id = Column(Integer, ForeignKey('map.id'))
x = Column(Integer)
y = Column(Integer)
posts = relationship('Post', backref='point', lazy='dynamic')
def __repr__(self):
return "<Point(id='{}', map_id='{}', x='{}', y='{}')>".format(
self.id, self.map_id, self.x, self.y)
class Post(MapBase):
__tablename__ = 'post'
id = Column(Integer, primary_key=True)
name = Column(String)
type = Column(Integer)
population = Column(Integer)
armor = Column(Integer)
product = Column(Integer)
replenishment = Column(Integer)
map_id = Column(Integer, ForeignKey('map.id'))
point_id = Column(Integer, ForeignKey('point.id'))
def __repr__(self):
return (
"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', "
"product='{}', replenishment='{}', map_id='{}', point_id='{}')>".format(
self.id, self.name, self.type, self.population, self.armor,
self.product, self.replenishment, self.map_id, self.point_id
)
)
class Game(ReplayBase):
__tablename__ = 'game'
id = Column(Integer, primary_key=True)
name = Column(String)
date = Column(DateTime)
map_name = Column(String)
actions = relationship('Action', backref='game', lazy='dynamic')
num_players = Column(Integer)
def __repr__(self):
return "<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>".format(
self.id, self.name, self.date, self.map_name, self.num_players)
class Action(ReplayBase):
__tablename__ = 'action'
id = Column(Integer, primary_key=True)
game_id = Column(Integer, ForeignKey('game.id'))
code = Column(Integer)
message = Column(String)
date = Column(DateTime)
def __repr__(self):
return "<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>".format(
self.id, self.game_id, self.code, self.message, self.date)
|
normal
|
{
"blob_id": "6b3cb7a42c8bc665e35206b135f6aefea3439758",
"index": 7381,
"step-1": "<mask token>\n\n\nclass Line(MapBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return (\"<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>\".\n format(self.id, self.len, self.p0, self.p1, self.map_id))\n\n\nclass Point(MapBase):\n __tablename__ = 'point'\n id = Column(Integer, primary_key=True)\n map_id = Column(Integer, ForeignKey('map.id'))\n x = Column(Integer)\n y = Column(Integer)\n posts = relationship('Post', backref='point', lazy='dynamic')\n\n def __repr__(self):\n return \"<Point(id='{}', map_id='{}', x='{}', y='{}')>\".format(self.\n id, self.map_id, self.x, self.y)\n\n\nclass Post(MapBase):\n __tablename__ = 'post'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n type = Column(Integer)\n population = Column(Integer)\n armor = Column(Integer)\n product = Column(Integer)\n replenishment = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n point_id = Column(Integer, ForeignKey('point.id'))\n\n def __repr__(self):\n return (\n \"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>\"\n .format(self.id, self.name, self.type, self.population, self.\n armor, self.product, self.replenishment, self.map_id, self.\n point_id))\n\n\nclass Game(ReplayBase):\n __tablename__ = 'game'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n date = Column(DateTime)\n map_name = Column(String)\n actions = relationship('Action', backref='game', lazy='dynamic')\n num_players = Column(Integer)\n\n def __repr__(self):\n return (\n \"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>\"\n .format(self.id, self.name, self.date, self.map_name, self.\n num_players))\n\n\nclass Action(ReplayBase):\n __tablename__ = 'action'\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n code = Column(Integer)\n message = Column(String)\n date = Column(DateTime)\n\n def __repr__(self):\n return (\n \"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>\"\n .format(self.id, self.game_id, self.code, self.message, self.date))\n",
"step-2": "<mask token>\n\n\nclass Map(MapBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Line(MapBase):\n __tablename__ = 'line'\n id = Column(Integer, primary_key=True)\n len = Column(Integer)\n p0 = Column(Integer)\n p1 = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n\n def __repr__(self):\n return (\"<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>\".\n format(self.id, self.len, self.p0, self.p1, self.map_id))\n\n\nclass Point(MapBase):\n __tablename__ = 'point'\n id = Column(Integer, primary_key=True)\n map_id = Column(Integer, ForeignKey('map.id'))\n x = Column(Integer)\n y = Column(Integer)\n posts = relationship('Post', backref='point', lazy='dynamic')\n\n def __repr__(self):\n return \"<Point(id='{}', map_id='{}', x='{}', y='{}')>\".format(self.\n id, self.map_id, self.x, self.y)\n\n\nclass Post(MapBase):\n __tablename__ = 'post'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n type = Column(Integer)\n population = Column(Integer)\n armor = Column(Integer)\n product = Column(Integer)\n replenishment = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n point_id = Column(Integer, ForeignKey('point.id'))\n\n def __repr__(self):\n return (\n \"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>\"\n .format(self.id, self.name, self.type, self.population, self.\n armor, self.product, self.replenishment, self.map_id, self.\n point_id))\n\n\nclass Game(ReplayBase):\n __tablename__ = 'game'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n date = Column(DateTime)\n map_name = Column(String)\n actions = relationship('Action', backref='game', lazy='dynamic')\n num_players = Column(Integer)\n\n def __repr__(self):\n return (\n \"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>\"\n .format(self.id, self.name, self.date, self.map_name, self.\n num_players))\n\n\nclass Action(ReplayBase):\n __tablename__ = 'action'\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n code = Column(Integer)\n message = Column(String)\n date = Column(DateTime)\n\n def __repr__(self):\n return (\n \"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>\"\n .format(self.id, self.game_id, self.code, self.message, self.date))\n",
"step-3": "<mask token>\n\n\nclass Map(MapBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return \"<Map(id='{}', name='{}', size_x='{}', size_y='{}')>\".format(\n self.id, self.name, self.size_x, self.size_y)\n\n\nclass Line(MapBase):\n __tablename__ = 'line'\n id = Column(Integer, primary_key=True)\n len = Column(Integer)\n p0 = Column(Integer)\n p1 = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n\n def __repr__(self):\n return (\"<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>\".\n format(self.id, self.len, self.p0, self.p1, self.map_id))\n\n\nclass Point(MapBase):\n __tablename__ = 'point'\n id = Column(Integer, primary_key=True)\n map_id = Column(Integer, ForeignKey('map.id'))\n x = Column(Integer)\n y = Column(Integer)\n posts = relationship('Post', backref='point', lazy='dynamic')\n\n def __repr__(self):\n return \"<Point(id='{}', map_id='{}', x='{}', y='{}')>\".format(self.\n id, self.map_id, self.x, self.y)\n\n\nclass Post(MapBase):\n __tablename__ = 'post'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n type = Column(Integer)\n population = Column(Integer)\n armor = Column(Integer)\n product = Column(Integer)\n replenishment = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n point_id = Column(Integer, ForeignKey('point.id'))\n\n def __repr__(self):\n return (\n \"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>\"\n .format(self.id, self.name, self.type, self.population, self.\n armor, self.product, self.replenishment, self.map_id, self.\n point_id))\n\n\nclass Game(ReplayBase):\n __tablename__ = 'game'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n date = Column(DateTime)\n map_name = Column(String)\n actions = relationship('Action', backref='game', lazy='dynamic')\n num_players = Column(Integer)\n\n def __repr__(self):\n return (\n \"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>\"\n .format(self.id, self.name, self.date, self.map_name, self.\n num_players))\n\n\nclass Action(ReplayBase):\n __tablename__ = 'action'\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n code = Column(Integer)\n message = Column(String)\n date = Column(DateTime)\n\n def __repr__(self):\n return (\n \"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>\"\n .format(self.id, self.game_id, self.code, self.message, self.date))\n",
"step-4": "<mask token>\n\n\nclass Map(MapBase):\n __tablename__ = 'map'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n size_x = Column(Integer)\n size_y = Column(Integer)\n lines = relationship('Line', backref='map', lazy='dynamic')\n points = relationship('Point', backref='map', lazy='dynamic')\n posts = relationship('Post', backref='map', lazy='dynamic')\n\n def __repr__(self):\n return \"<Map(id='{}', name='{}', size_x='{}', size_y='{}')>\".format(\n self.id, self.name, self.size_x, self.size_y)\n\n\nclass Line(MapBase):\n __tablename__ = 'line'\n id = Column(Integer, primary_key=True)\n len = Column(Integer)\n p0 = Column(Integer)\n p1 = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n\n def __repr__(self):\n return (\"<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>\".\n format(self.id, self.len, self.p0, self.p1, self.map_id))\n\n\nclass Point(MapBase):\n __tablename__ = 'point'\n id = Column(Integer, primary_key=True)\n map_id = Column(Integer, ForeignKey('map.id'))\n x = Column(Integer)\n y = Column(Integer)\n posts = relationship('Post', backref='point', lazy='dynamic')\n\n def __repr__(self):\n return \"<Point(id='{}', map_id='{}', x='{}', y='{}')>\".format(self.\n id, self.map_id, self.x, self.y)\n\n\nclass Post(MapBase):\n __tablename__ = 'post'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n type = Column(Integer)\n population = Column(Integer)\n armor = Column(Integer)\n product = Column(Integer)\n replenishment = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n point_id = Column(Integer, ForeignKey('point.id'))\n\n def __repr__(self):\n return (\n \"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>\"\n .format(self.id, self.name, self.type, self.population, self.\n armor, self.product, self.replenishment, self.map_id, self.\n point_id))\n\n\nclass Game(ReplayBase):\n __tablename__ = 'game'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n date = Column(DateTime)\n map_name = Column(String)\n actions = relationship('Action', backref='game', lazy='dynamic')\n num_players = Column(Integer)\n\n def __repr__(self):\n return (\n \"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>\"\n .format(self.id, self.name, self.date, self.map_name, self.\n num_players))\n\n\nclass Action(ReplayBase):\n __tablename__ = 'action'\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n code = Column(Integer)\n message = Column(String)\n date = Column(DateTime)\n\n def __repr__(self):\n return (\n \"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>\"\n .format(self.id, self.game_id, self.code, self.message, self.date))\n",
"step-5": "\"\"\" DB models.\n\"\"\"\nfrom sqlalchemy import Column, Integer, String, ForeignKey, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\n\nfrom db.session import map_engine, replay_engine\n\nMapBase = declarative_base(bind=map_engine)\nReplayBase = declarative_base(bind=replay_engine)\n\n\nclass Map(MapBase):\n\n __tablename__ = 'map'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n size_x = Column(Integer)\n size_y = Column(Integer)\n lines = relationship('Line', backref='map', lazy='dynamic')\n points = relationship('Point', backref='map', lazy='dynamic')\n posts = relationship('Post', backref='map', lazy='dynamic')\n\n def __repr__(self):\n return \"<Map(id='{}', name='{}', size_x='{}', size_y='{}')>\".format(\n self.id, self.name, self.size_x, self.size_y)\n\n\nclass Line(MapBase):\n\n __tablename__ = 'line'\n\n id = Column(Integer, primary_key=True)\n len = Column(Integer)\n p0 = Column(Integer)\n p1 = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n\n def __repr__(self):\n return \"<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>\".format(\n self.id, self.len, self.p0, self.p1, self.map_id)\n\n\nclass Point(MapBase):\n\n __tablename__ = 'point'\n\n id = Column(Integer, primary_key=True)\n map_id = Column(Integer, ForeignKey('map.id'))\n x = Column(Integer)\n y = Column(Integer)\n posts = relationship('Post', backref='point', lazy='dynamic')\n\n def __repr__(self):\n return \"<Point(id='{}', map_id='{}', x='{}', y='{}')>\".format(\n self.id, self.map_id, self.x, self.y)\n\n\nclass Post(MapBase):\n\n __tablename__ = 'post'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n type = Column(Integer)\n population = Column(Integer)\n armor = Column(Integer)\n product = Column(Integer)\n replenishment = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n point_id = Column(Integer, ForeignKey('point.id'))\n\n def __repr__(self):\n return (\n \"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', \"\n \"product='{}', replenishment='{}', map_id='{}', point_id='{}')>\".format(\n self.id, self.name, self.type, self.population, self.armor,\n self.product, self.replenishment, self.map_id, self.point_id\n )\n )\n\n\nclass Game(ReplayBase):\n\n __tablename__ = 'game'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n date = Column(DateTime)\n map_name = Column(String)\n actions = relationship('Action', backref='game', lazy='dynamic')\n num_players = Column(Integer)\n\n def __repr__(self):\n return \"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>\".format(\n self.id, self.name, self.date, self.map_name, self.num_players)\n\n\nclass Action(ReplayBase):\n\n __tablename__ = 'action'\n\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n code = Column(Integer)\n message = Column(String)\n date = Column(DateTime)\n\n def __repr__(self):\n return \"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>\".format(\n self.id, self.game_id, self.code, self.message, self.date)\n",
"step-ids": [
14,
16,
17,
18,
21
]
}
|
[
14,
16,
17,
18,
21
] |
class Solution(object):
def checkSubarraySum(self, nums, k):
if not nums or len(nums) == 1:
return False
sum_array = [0] * (len(nums) + 1)
for i, num in enumerate(nums):
sum_array[i + 1] = sum_array[i] + num
if k == 0:
if sum_array[-1] == 0:
return True
else:
return False
for i in range(1, len(sum_array)):
for j in range(i - 1):
if not (sum_array[i] - sum_array[j]) % k:
return True
return False
|
normal
|
{
"blob_id": "033973ddc81a5fdf0e40009c4f321215fe3f4217",
"index": 6779,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def checkSubarraySum(self, nums, k):\n if not nums or len(nums) == 1:\n return False\n sum_array = [0] * (len(nums) + 1)\n for i, num in enumerate(nums):\n sum_array[i + 1] = sum_array[i] + num\n if k == 0:\n if sum_array[-1] == 0:\n return True\n else:\n return False\n for i in range(1, len(sum_array)):\n for j in range(i - 1):\n if not (sum_array[i] - sum_array[j]) % k:\n return True\n return False\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(args):
openstack.enable_logging(debug=False)
api = openstack.connect(cloud=args.cloud)
snapshot_id = args.snapshot
server = args.volume
try:
snapshot = Snapshot(api=api, snapshot=api.volume.get_snapshot(
snapshot_id))
except openstack.exceptions.ResourceNotFound:
print('Snapshot id {} not found.'.format(snapshot_id))
sys.exit(1)
today = time.strftime('%d-%m-%Y')
print('')
print('Converting snapshot to volume..')
volume = snapshot.to_volume('{}-restore-{}'.format(server, today))
print('Converting volume to image..')
image = volume.to_image('{}-restore-{}'.format(server, today))
print('Converting image to volume..')
image.to_volume(server, size=volume.volume.size)
image.delete()
volume.delete()
print('')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(args):
openstack.enable_logging(debug=False)
api = openstack.connect(cloud=args.cloud)
snapshot_id = args.snapshot
server = args.volume
try:
snapshot = Snapshot(api=api, snapshot=api.volume.get_snapshot(
snapshot_id))
except openstack.exceptions.ResourceNotFound:
print('Snapshot id {} not found.'.format(snapshot_id))
sys.exit(1)
today = time.strftime('%d-%m-%Y')
print('')
print('Converting snapshot to volume..')
volume = snapshot.to_volume('{}-restore-{}'.format(server, today))
print('Converting volume to image..')
image = volume.to_image('{}-restore-{}'.format(server, today))
print('Converting image to volume..')
image.to_volume(server, size=volume.volume.size)
image.delete()
volume.delete()
print('')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Restore snapshots')
parser.add_argument('--snapshot', required=True, help='', metavar=
'<snapshot_id>')
parser.add_argument('--volume', required=True, help='', metavar=
'<volume name>')
parser.add_argument('--cloud', help='', metavar=
'<cloud in clouds.yaml>', default='fuga')
args = parser.parse_args()
main(args)
<|reserved_special_token_1|>
import argparse
import openstack
import time
import sys
from sdk import Snapshot
def main(args):
openstack.enable_logging(debug=False)
api = openstack.connect(cloud=args.cloud)
snapshot_id = args.snapshot
server = args.volume
try:
snapshot = Snapshot(api=api, snapshot=api.volume.get_snapshot(
snapshot_id))
except openstack.exceptions.ResourceNotFound:
print('Snapshot id {} not found.'.format(snapshot_id))
sys.exit(1)
today = time.strftime('%d-%m-%Y')
print('')
print('Converting snapshot to volume..')
volume = snapshot.to_volume('{}-restore-{}'.format(server, today))
print('Converting volume to image..')
image = volume.to_image('{}-restore-{}'.format(server, today))
print('Converting image to volume..')
image.to_volume(server, size=volume.volume.size)
image.delete()
volume.delete()
print('')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Restore snapshots')
parser.add_argument('--snapshot', required=True, help='', metavar=
'<snapshot_id>')
parser.add_argument('--volume', required=True, help='', metavar=
'<volume name>')
parser.add_argument('--cloud', help='', metavar=
'<cloud in clouds.yaml>', default='fuga')
args = parser.parse_args()
main(args)
<|reserved_special_token_1|>
#!/usr/bin/env python
#
# This will take a snapshot and convert it into a volume. To create a volume
# without any links to the old snapshot you need to convert it to a temporary
# volume first, convert that into an image and convert the image back into
# your final volume. Once this is all done, the temporary volume and image
# will be removed.
#
import argparse
import openstack
import time
import sys
from sdk import Snapshot
def main(args):
# Set up the connection to OpenStack -- this is read from clouds.yaml
openstack.enable_logging(debug=False)
api = openstack.connect(cloud=args.cloud)
snapshot_id = args.snapshot
server = args.volume
# Create a snapshot object
try:
snapshot = Snapshot(
api=api,
snapshot=api.volume.get_snapshot(snapshot_id),
)
except openstack.exceptions.ResourceNotFound:
print('Snapshot id {} not found.'.format(snapshot_id))
sys.exit(1)
today = time.strftime("%d-%m-%Y")
# Convert the snapshot to a volume
print('')
print('Converting snapshot to volume..')
volume = snapshot.to_volume('{}-restore-{}'.format(server, today))
print('Converting volume to image..')
image = volume.to_image('{}-restore-{}'.format(server, today))
print('Converting image to volume..')
image.to_volume(server, size=volume.volume.size)
image.delete()
volume.delete()
print('')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Restore snapshots')
parser.add_argument(
'--snapshot',
required=True,
help='',
metavar=('<snapshot_id>'),
)
parser.add_argument(
'--volume',
required=True,
help='',
metavar=('<volume name>'),
)
parser.add_argument(
'--cloud',
help='',
metavar=('<cloud in clouds.yaml>'),
default='fuga',
)
args = parser.parse_args()
main(args)
|
flexible
|
{
"blob_id": "aebe749a20482636d7ed508f9cbd9cde56656b73",
"index": 6236,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(args):\n openstack.enable_logging(debug=False)\n api = openstack.connect(cloud=args.cloud)\n snapshot_id = args.snapshot\n server = args.volume\n try:\n snapshot = Snapshot(api=api, snapshot=api.volume.get_snapshot(\n snapshot_id))\n except openstack.exceptions.ResourceNotFound:\n print('Snapshot id {} not found.'.format(snapshot_id))\n sys.exit(1)\n today = time.strftime('%d-%m-%Y')\n print('')\n print('Converting snapshot to volume..')\n volume = snapshot.to_volume('{}-restore-{}'.format(server, today))\n print('Converting volume to image..')\n image = volume.to_image('{}-restore-{}'.format(server, today))\n print('Converting image to volume..')\n image.to_volume(server, size=volume.volume.size)\n image.delete()\n volume.delete()\n print('')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(args):\n openstack.enable_logging(debug=False)\n api = openstack.connect(cloud=args.cloud)\n snapshot_id = args.snapshot\n server = args.volume\n try:\n snapshot = Snapshot(api=api, snapshot=api.volume.get_snapshot(\n snapshot_id))\n except openstack.exceptions.ResourceNotFound:\n print('Snapshot id {} not found.'.format(snapshot_id))\n sys.exit(1)\n today = time.strftime('%d-%m-%Y')\n print('')\n print('Converting snapshot to volume..')\n volume = snapshot.to_volume('{}-restore-{}'.format(server, today))\n print('Converting volume to image..')\n image = volume.to_image('{}-restore-{}'.format(server, today))\n print('Converting image to volume..')\n image.to_volume(server, size=volume.volume.size)\n image.delete()\n volume.delete()\n print('')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Restore snapshots')\n parser.add_argument('--snapshot', required=True, help='', metavar=\n '<snapshot_id>')\n parser.add_argument('--volume', required=True, help='', metavar=\n '<volume name>')\n parser.add_argument('--cloud', help='', metavar=\n '<cloud in clouds.yaml>', default='fuga')\n args = parser.parse_args()\n main(args)\n",
"step-4": "import argparse\nimport openstack\nimport time\nimport sys\nfrom sdk import Snapshot\n\n\ndef main(args):\n openstack.enable_logging(debug=False)\n api = openstack.connect(cloud=args.cloud)\n snapshot_id = args.snapshot\n server = args.volume\n try:\n snapshot = Snapshot(api=api, snapshot=api.volume.get_snapshot(\n snapshot_id))\n except openstack.exceptions.ResourceNotFound:\n print('Snapshot id {} not found.'.format(snapshot_id))\n sys.exit(1)\n today = time.strftime('%d-%m-%Y')\n print('')\n print('Converting snapshot to volume..')\n volume = snapshot.to_volume('{}-restore-{}'.format(server, today))\n print('Converting volume to image..')\n image = volume.to_image('{}-restore-{}'.format(server, today))\n print('Converting image to volume..')\n image.to_volume(server, size=volume.volume.size)\n image.delete()\n volume.delete()\n print('')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Restore snapshots')\n parser.add_argument('--snapshot', required=True, help='', metavar=\n '<snapshot_id>')\n parser.add_argument('--volume', required=True, help='', metavar=\n '<volume name>')\n parser.add_argument('--cloud', help='', metavar=\n '<cloud in clouds.yaml>', default='fuga')\n args = parser.parse_args()\n main(args)\n",
"step-5": "#!/usr/bin/env python\n#\n# This will take a snapshot and convert it into a volume. To create a volume\n# without any links to the old snapshot you need to convert it to a temporary\n# volume first, convert that into an image and convert the image back into\n# your final volume. Once this is all done, the temporary volume and image\n# will be removed.\n#\n\nimport argparse\nimport openstack\nimport time\nimport sys\nfrom sdk import Snapshot\n\n\ndef main(args):\n # Set up the connection to OpenStack -- this is read from clouds.yaml\n openstack.enable_logging(debug=False)\n api = openstack.connect(cloud=args.cloud)\n\n snapshot_id = args.snapshot\n server = args.volume\n\n # Create a snapshot object\n try:\n snapshot = Snapshot(\n api=api,\n snapshot=api.volume.get_snapshot(snapshot_id),\n )\n except openstack.exceptions.ResourceNotFound:\n print('Snapshot id {} not found.'.format(snapshot_id))\n sys.exit(1)\n\n today = time.strftime(\"%d-%m-%Y\")\n\n # Convert the snapshot to a volume\n print('')\n\n print('Converting snapshot to volume..')\n volume = snapshot.to_volume('{}-restore-{}'.format(server, today))\n\n print('Converting volume to image..')\n image = volume.to_image('{}-restore-{}'.format(server, today))\n\n print('Converting image to volume..')\n image.to_volume(server, size=volume.volume.size)\n\n image.delete()\n volume.delete()\n print('')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Restore snapshots')\n parser.add_argument(\n '--snapshot',\n required=True,\n help='',\n metavar=('<snapshot_id>'),\n )\n parser.add_argument(\n '--volume',\n required=True,\n help='',\n metavar=('<volume name>'),\n )\n parser.add_argument(\n '--cloud',\n help='',\n metavar=('<cloud in clouds.yaml>'),\n default='fuga',\n )\n\n args = parser.parse_args()\n main(args)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import importlib
def import_string(path):
"""
根据字符串的形式去导入路径中的对象
:param path: 'src.engine.agent.AgentHandler'
:return:
"""
module_path,cls_name = path.rsplit('.',maxsplit=1)
module = importlib.import_module(module_path)
return getattr(module,cls_name)
|
normal
|
{
"blob_id": "8502ebdb13c68a9a56a1a4ba51370d8458ca81dc",
"index": 7944,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef import_string(path):\n \"\"\"\n 根据字符串的形式去导入路径中的对象\n :param path: 'src.engine.agent.AgentHandler'\n :return:\n \"\"\"\n module_path, cls_name = path.rsplit('.', maxsplit=1)\n module = importlib.import_module(module_path)\n return getattr(module, cls_name)\n",
"step-3": "import importlib\n\n\ndef import_string(path):\n \"\"\"\n 根据字符串的形式去导入路径中的对象\n :param path: 'src.engine.agent.AgentHandler'\n :return:\n \"\"\"\n module_path, cls_name = path.rsplit('.', maxsplit=1)\n module = importlib.import_module(module_path)\n return getattr(module, cls_name)\n",
"step-4": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\nimport importlib\n\ndef import_string(path):\n \"\"\"\n 根据字符串的形式去导入路径中的对象\n :param path: 'src.engine.agent.AgentHandler'\n :return:\n \"\"\"\n\n module_path,cls_name = path.rsplit('.',maxsplit=1)\n module = importlib.import_module(module_path)\n return getattr(module,cls_name)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Dependencies
import pandas as pd
# Load in data file from resources
bank_data = "Resources/budget_data.csv"
# Read and display with pandas
bank_df = pd.read_csv(bank_data)
# Find the total number of months included in the dataset
total_months = bank_df["Date"].count()
# Find the total net amount of "Profit/Losses" over the entire period
net_end = bank_df["Profit/Losses"].sum()
# Create a new column that displays profit or loss between months
bank_df["Change"] = bank_df["Profit/Losses"].diff()
# Find the average change in "Profit/Losses" between months over the entire period
average_change = bank_df["Change"].mean()
# Find the greatest increase in profits (date and amount) over the entire period
greatest_increase = bank_df["Change"].max()
greatest_increase_month = bank_df.loc[bank_df["Change"] == greatest_increase, :]
# Find the greatest decrease in losses (date and amount) over the entire period
greatest_decrease = bank_df["Change"].min()
greatest_decrease_month = bank_df.loc[bank_df["Change"] == greatest_decrease, :]
# Print financial analysis
financial_analysis = (print("Financial Analysis"), print("----------------------------"),
print(f'Total Months: {total_months}'), print(f'Total: {net_end}'),
print(f'Average Change: ${round(average_change)}'),
print(f'Greatest Increase in Profits:'),
print(str(greatest_increase_month)),
print(f'Greatest Decrease in Profits:'),
print(greatest_decrease_month))
# Export to .txt
output = open("output.txt", "w")
line1 = "Financial Analysis"
line2 = "---------------------"
line3 = str(f'Total Months: {total_months}')
line4 = str(f'Total: {net_end}')
line5 = str(f'Average Change: ${average_change}')
line6 = str(f'Greatest Increase in Profits: {greatest_increase_month}')
line7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}')
output.write('{}\n{}\n{}\n{}\n{}\n{}\n{}\n'.format(line1,line2,line3,line4,line5,line6,line7))
|
normal
|
{
"blob_id": "1ad694c68ef264c6fbba4f4b9c069f22818d2816",
"index": 9973,
"step-1": "<mask token>\n",
"step-2": "<mask token>\noutput.write(\"\"\"{}\n{}\n{}\n{}\n{}\n{}\n{}\n\"\"\".format(line1, line2, line3, line4,\n line5, line6, line7))\n",
"step-3": "<mask token>\nbank_data = 'Resources/budget_data.csv'\nbank_df = pd.read_csv(bank_data)\ntotal_months = bank_df['Date'].count()\nnet_end = bank_df['Profit/Losses'].sum()\nbank_df['Change'] = bank_df['Profit/Losses'].diff()\naverage_change = bank_df['Change'].mean()\ngreatest_increase = bank_df['Change'].max()\ngreatest_increase_month = bank_df.loc[bank_df['Change'] == greatest_increase, :\n ]\ngreatest_decrease = bank_df['Change'].min()\ngreatest_decrease_month = bank_df.loc[bank_df['Change'] == greatest_decrease, :\n ]\nfinancial_analysis = print('Financial Analysis'), print(\n '----------------------------'), print(f'Total Months: {total_months}'\n ), print(f'Total: {net_end}'), print(\n f'Average Change: ${round(average_change)}'), print(\n f'Greatest Increase in Profits:'), print(str(greatest_increase_month)\n ), print(f'Greatest Decrease in Profits:'), print(greatest_decrease_month)\noutput = open('output.txt', 'w')\nline1 = 'Financial Analysis'\nline2 = '---------------------'\nline3 = str(f'Total Months: {total_months}')\nline4 = str(f'Total: {net_end}')\nline5 = str(f'Average Change: ${average_change}')\nline6 = str(f'Greatest Increase in Profits: {greatest_increase_month}')\nline7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}')\noutput.write(\"\"\"{}\n{}\n{}\n{}\n{}\n{}\n{}\n\"\"\".format(line1, line2, line3, line4,\n line5, line6, line7))\n",
"step-4": "import pandas as pd\nbank_data = 'Resources/budget_data.csv'\nbank_df = pd.read_csv(bank_data)\ntotal_months = bank_df['Date'].count()\nnet_end = bank_df['Profit/Losses'].sum()\nbank_df['Change'] = bank_df['Profit/Losses'].diff()\naverage_change = bank_df['Change'].mean()\ngreatest_increase = bank_df['Change'].max()\ngreatest_increase_month = bank_df.loc[bank_df['Change'] == greatest_increase, :\n ]\ngreatest_decrease = bank_df['Change'].min()\ngreatest_decrease_month = bank_df.loc[bank_df['Change'] == greatest_decrease, :\n ]\nfinancial_analysis = print('Financial Analysis'), print(\n '----------------------------'), print(f'Total Months: {total_months}'\n ), print(f'Total: {net_end}'), print(\n f'Average Change: ${round(average_change)}'), print(\n f'Greatest Increase in Profits:'), print(str(greatest_increase_month)\n ), print(f'Greatest Decrease in Profits:'), print(greatest_decrease_month)\noutput = open('output.txt', 'w')\nline1 = 'Financial Analysis'\nline2 = '---------------------'\nline3 = str(f'Total Months: {total_months}')\nline4 = str(f'Total: {net_end}')\nline5 = str(f'Average Change: ${average_change}')\nline6 = str(f'Greatest Increase in Profits: {greatest_increase_month}')\nline7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}')\noutput.write(\"\"\"{}\n{}\n{}\n{}\n{}\n{}\n{}\n\"\"\".format(line1, line2, line3, line4,\n line5, line6, line7))\n",
"step-5": "# Dependencies\nimport pandas as pd\n\n# Load in data file from resources\nbank_data = \"Resources/budget_data.csv\"\n\n# Read and display with pandas\nbank_df = pd.read_csv(bank_data)\n\n# Find the total number of months included in the dataset\ntotal_months = bank_df[\"Date\"].count()\n\n# Find the total net amount of \"Profit/Losses\" over the entire period\nnet_end = bank_df[\"Profit/Losses\"].sum()\n\n# Create a new column that displays profit or loss between months\nbank_df[\"Change\"] = bank_df[\"Profit/Losses\"].diff()\n\n# Find the average change in \"Profit/Losses\" between months over the entire period\naverage_change = bank_df[\"Change\"].mean()\n\n# Find the greatest increase in profits (date and amount) over the entire period\ngreatest_increase = bank_df[\"Change\"].max()\ngreatest_increase_month = bank_df.loc[bank_df[\"Change\"] == greatest_increase, :]\n\n# Find the greatest decrease in losses (date and amount) over the entire period\ngreatest_decrease = bank_df[\"Change\"].min()\ngreatest_decrease_month = bank_df.loc[bank_df[\"Change\"] == greatest_decrease, :]\n\n# Print financial analysis\nfinancial_analysis = (print(\"Financial Analysis\"), print(\"----------------------------\"), \nprint(f'Total Months: {total_months}'), print(f'Total: {net_end}'), \nprint(f'Average Change: ${round(average_change)}'), \nprint(f'Greatest Increase in Profits:'), \nprint(str(greatest_increase_month)),\nprint(f'Greatest Decrease in Profits:'), \nprint(greatest_decrease_month))\n\n# Export to .txt\noutput = open(\"output.txt\", \"w\")\n\nline1 = \"Financial Analysis\"\nline2 = \"---------------------\"\nline3 = str(f'Total Months: {total_months}')\nline4 = str(f'Total: {net_end}')\nline5 = str(f'Average Change: ${average_change}')\nline6 = str(f'Greatest Increase in Profits: {greatest_increase_month}')\nline7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}')\noutput.write('{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n'.format(line1,line2,line3,line4,line5,line6,line7))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import array
def swap(arr, first, second):
"""
Swaps the first index with the second.
arr: an input array
first: an index in the array
second: an index in the array
This function has the side effect mentioned above.
"""
arr[first], arr[second] = arr[second], arr[first]
def parent(i):
"""
i: an integer index in a heap.
Returns the index of the parent of the given index.
"""
return (i + 1) / 2 - 1
def left(i):
"""
i: an integer index in a heap.
Returns the index of the left-child of the given index.
"""
return 2 * (i + 1) - 1
def right(i):
"""
i: an integer index in a heap
Returns the index of the right-child of the given index.
"""
return 2 * (i + 1)
def max_heapify(heap, i):
"""
Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps
but that A[i] may be smaller than its children. Max-heapify lets A[i] float
down in order to satisfy the max-heap property.
heap: an array that is being treated as a heap
i: an index in the heap
This method causes side effects in the heap given to it that bring the heap
closer to a max-heap.
"""
left_child = left(i)
right_child = right(i)
if left_child < len(heap) and heap[left_child] > heap[i]:
largest = left_child
else:
largest = i
if right_child < len(heap) and heap[right_child] > heap[largest]:
largest = right_child
if largest != i:
swap(heap, i, largest)
max_heapify(heap, largest)
example_heap = array.array('i', [16, 4, 10, 14, 7, 9, 3, 2, 8, 1])
def build_max_heap(arr):
for i in range(len(arr) / 2, 0, -1):
max_heapify(arr, i - 1)
def max_heapify_unrecursive(heap, i):
"""
Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps
but that A[i] may be smaller than its children. Max-heapify lets A[i] float
down in order to satisfy the max-heap property.
heap: an array that is being treated as a heap
i: an index in the heap
This method causes side effects in the heap given to it that bring the heap
closer to a max-heap.
"""
while True:
left_child = left(i)
right_child = right(i)
largest = i
if left_child < len(heap) and heap[left_child] > heap[i]:
largest = left_child
if right_child < len(heap) and heap[right_child] > heap[largest]:
largest = right_child
if largest == i:
return
swap(heap, i, largest)
i = largest
def heap_sort(arr):
build_max_heap(arr)
sorted_list = []
while arr:
sorted_list.append(arr.pop(0))
max_heapify(arr, 0)
sorted_list.reverse()
return sorted_list
|
normal
|
{
"blob_id": "1262d41be3bf873d003464cb23998dde20fde318",
"index": 8115,
"step-1": "<mask token>\n\n\ndef swap(arr, first, second):\n \"\"\"\n Swaps the first index with the second.\n\n arr: an input array\n first: an index in the array\n second: an index in the array\n\n This function has the side effect mentioned above.\n \"\"\"\n arr[first], arr[second] = arr[second], arr[first]\n\n\ndef parent(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the parent of the given index.\n \"\"\"\n return (i + 1) / 2 - 1\n\n\ndef left(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the left-child of the given index.\n \"\"\"\n return 2 * (i + 1) - 1\n\n\n<mask token>\n\n\ndef build_max_heap(arr):\n for i in range(len(arr) / 2, 0, -1):\n max_heapify(arr, i - 1)\n\n\n<mask token>\n\n\ndef heap_sort(arr):\n build_max_heap(arr)\n sorted_list = []\n while arr:\n sorted_list.append(arr.pop(0))\n max_heapify(arr, 0)\n sorted_list.reverse()\n return sorted_list\n",
"step-2": "<mask token>\n\n\ndef swap(arr, first, second):\n \"\"\"\n Swaps the first index with the second.\n\n arr: an input array\n first: an index in the array\n second: an index in the array\n\n This function has the side effect mentioned above.\n \"\"\"\n arr[first], arr[second] = arr[second], arr[first]\n\n\ndef parent(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the parent of the given index.\n \"\"\"\n return (i + 1) / 2 - 1\n\n\ndef left(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the left-child of the given index.\n \"\"\"\n return 2 * (i + 1) - 1\n\n\n<mask token>\n\n\ndef max_heapify(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n left_child = left(i)\n right_child = right(i)\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n else:\n largest = i\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest != i:\n swap(heap, i, largest)\n max_heapify(heap, largest)\n\n\n<mask token>\n\n\ndef build_max_heap(arr):\n for i in range(len(arr) / 2, 0, -1):\n max_heapify(arr, i - 1)\n\n\ndef max_heapify_unrecursive(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n while True:\n left_child = left(i)\n right_child = right(i)\n largest = i\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest == i:\n return\n swap(heap, i, largest)\n i = largest\n\n\ndef heap_sort(arr):\n build_max_heap(arr)\n sorted_list = []\n while arr:\n sorted_list.append(arr.pop(0))\n max_heapify(arr, 0)\n sorted_list.reverse()\n return sorted_list\n",
"step-3": "<mask token>\n\n\ndef swap(arr, first, second):\n \"\"\"\n Swaps the first index with the second.\n\n arr: an input array\n first: an index in the array\n second: an index in the array\n\n This function has the side effect mentioned above.\n \"\"\"\n arr[first], arr[second] = arr[second], arr[first]\n\n\ndef parent(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the parent of the given index.\n \"\"\"\n return (i + 1) / 2 - 1\n\n\ndef left(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the left-child of the given index.\n \"\"\"\n return 2 * (i + 1) - 1\n\n\ndef right(i):\n \"\"\"\n i: an integer index in a heap\n\n Returns the index of the right-child of the given index.\n \"\"\"\n return 2 * (i + 1)\n\n\ndef max_heapify(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n left_child = left(i)\n right_child = right(i)\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n else:\n largest = i\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest != i:\n swap(heap, i, largest)\n max_heapify(heap, largest)\n\n\n<mask token>\n\n\ndef build_max_heap(arr):\n for i in range(len(arr) / 2, 0, -1):\n max_heapify(arr, i - 1)\n\n\ndef max_heapify_unrecursive(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n while True:\n left_child = left(i)\n right_child = right(i)\n largest = i\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest == i:\n return\n swap(heap, i, largest)\n i = largest\n\n\ndef heap_sort(arr):\n build_max_heap(arr)\n sorted_list = []\n while arr:\n sorted_list.append(arr.pop(0))\n max_heapify(arr, 0)\n sorted_list.reverse()\n return sorted_list\n",
"step-4": "import array\n\n\ndef swap(arr, first, second):\n \"\"\"\n Swaps the first index with the second.\n\n arr: an input array\n first: an index in the array\n second: an index in the array\n\n This function has the side effect mentioned above.\n \"\"\"\n arr[first], arr[second] = arr[second], arr[first]\n\n\ndef parent(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the parent of the given index.\n \"\"\"\n return (i + 1) / 2 - 1\n\n\ndef left(i):\n \"\"\"\n i: an integer index in a heap.\n\n Returns the index of the left-child of the given index.\n \"\"\"\n return 2 * (i + 1) - 1\n\n\ndef right(i):\n \"\"\"\n i: an integer index in a heap\n\n Returns the index of the right-child of the given index.\n \"\"\"\n return 2 * (i + 1)\n\n\ndef max_heapify(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n left_child = left(i)\n right_child = right(i)\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n else:\n largest = i\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest != i:\n swap(heap, i, largest)\n max_heapify(heap, largest)\n\n\nexample_heap = array.array('i', [16, 4, 10, 14, 7, 9, 3, 2, 8, 1])\n\n\ndef build_max_heap(arr):\n for i in range(len(arr) / 2, 0, -1):\n max_heapify(arr, i - 1)\n\n\ndef max_heapify_unrecursive(heap, i):\n \"\"\"\n Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps\n but that A[i] may be smaller than its children. Max-heapify lets A[i] float\n down in order to satisfy the max-heap property.\n\n heap: an array that is being treated as a heap\n i: an index in the heap\n\n This method causes side effects in the heap given to it that bring the heap\n closer to a max-heap.\n \"\"\"\n while True:\n left_child = left(i)\n right_child = right(i)\n largest = i\n if left_child < len(heap) and heap[left_child] > heap[i]:\n largest = left_child\n if right_child < len(heap) and heap[right_child] > heap[largest]:\n largest = right_child\n if largest == i:\n return\n swap(heap, i, largest)\n i = largest\n\n\ndef heap_sort(arr):\n build_max_heap(arr)\n sorted_list = []\n while arr:\n sorted_list.append(arr.pop(0))\n max_heapify(arr, 0)\n sorted_list.reverse()\n return sorted_list\n",
"step-5": null,
"step-ids": [
5,
7,
8,
10
]
}
|
[
5,
7,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.set_printoptions(precision=3)
<|reserved_special_token_0|>
sp.init_printing()
<|reserved_special_token_0|>
I.extend([(v ** 2 - 1) for v in vs])
print('Generating')
<|reserved_special_token_0|>
print('Done')
print('=== Generator Basis:')
for f in B.generator_basis:
display(f.as_expr())
print('=== Quotient Basis:')
for f in B.quotient_basis():
display(f.as_expr())
print('=== Variety:')
for v in B.zeros():
print(zip(R.symbols, v))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.set_printoptions(precision=3)
<|reserved_special_token_0|>
sp.init_printing()
R, x, y = sp.ring('x,y', sp.RR, order=sp.grevlex)
I = [x ** 2 + y ** 2 - 1.0, x + y]
R, x, y, z = sp.ring('x,y,z', sp.RR, order=sp.grevlex)
I = [x ** 2 - 1, y ** 2 - 4, z ** 2 - 9]
n = 4
Rvs = sp.ring(' '.join('v' + str(i) for i in range(1, n + 1)), sp.RR, order
=sp.grevlex)
R, vs = Rvs[0], Rvs[1:]
I = []
I.extend([(v ** 2 - 1) for v in vs])
print('Generating')
B = BB.BorderBasisFactory(1e-05).generate(R, I)
print('Done')
print('=== Generator Basis:')
for f in B.generator_basis:
display(f.as_expr())
print('=== Quotient Basis:')
for f in B.quotient_basis():
display(f.as_expr())
print('=== Variety:')
for v in B.zeros():
print(zip(R.symbols, v))
<|reserved_special_token_1|>
from __future__ import print_function
import sympy as sp
import numpy as np
import BorderBasis as BB
np.set_printoptions(precision=3)
from IPython.display import display, Markdown, Math
sp.init_printing()
R, x, y = sp.ring('x,y', sp.RR, order=sp.grevlex)
I = [x ** 2 + y ** 2 - 1.0, x + y]
R, x, y, z = sp.ring('x,y,z', sp.RR, order=sp.grevlex)
I = [x ** 2 - 1, y ** 2 - 4, z ** 2 - 9]
n = 4
Rvs = sp.ring(' '.join('v' + str(i) for i in range(1, n + 1)), sp.RR, order
=sp.grevlex)
R, vs = Rvs[0], Rvs[1:]
I = []
I.extend([(v ** 2 - 1) for v in vs])
print('Generating')
B = BB.BorderBasisFactory(1e-05).generate(R, I)
print('Done')
print('=== Generator Basis:')
for f in B.generator_basis:
display(f.as_expr())
print('=== Quotient Basis:')
for f in B.quotient_basis():
display(f.as_expr())
print('=== Variety:')
for v in B.zeros():
print(zip(R.symbols, v))
<|reserved_special_token_1|>
# Percy's playground.
from __future__ import print_function
import sympy as sp
import numpy as np
import BorderBasis as BB
np.set_printoptions(precision=3)
from IPython.display import display, Markdown, Math
sp.init_printing()
R, x, y = sp.ring('x,y', sp.RR, order=sp.grevlex)
I = [ x**2 + y**2 - 1.0, x + y ]
R, x, y, z = sp.ring('x,y,z', sp.RR, order=sp.grevlex)
I = [ x**2 - 1, y**2 - 4, z**2 - 9]
# n = 4 takes a long time
n = 4
Rvs = sp.ring(' '.join('v'+str(i) for i in range(1, n + 1)), sp.RR, order=sp.grevlex)
R, vs = Rvs[0], Rvs[1:]
I = []
I.extend([v**2 - 1 for v in vs])
#I.extend([(v-1)**2 for v in vs])
#I.extend([v-1 for v in vs])
#I.extend([vs[i] - vs[i-1] for i in range(1, len(vs))]) # Makes it fast
print('Generating')
B = BB.BorderBasisFactory(1e-5).generate(R,I)
print('Done')
print("=== Generator Basis:")
for f in B.generator_basis:
display(f.as_expr())
print("=== Quotient Basis:")
for f in B.quotient_basis():
display(f.as_expr())
# v2 is always zero
print("=== Variety:")
for v in B.zeros():
print(zip(R.symbols, v))
|
flexible
|
{
"blob_id": "88af8b4eeb40ecf19622ecde1a5dea9a078bb66c",
"index": 8817,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.set_printoptions(precision=3)\n<mask token>\nsp.init_printing()\n<mask token>\nI.extend([(v ** 2 - 1) for v in vs])\nprint('Generating')\n<mask token>\nprint('Done')\nprint('=== Generator Basis:')\nfor f in B.generator_basis:\n display(f.as_expr())\nprint('=== Quotient Basis:')\nfor f in B.quotient_basis():\n display(f.as_expr())\nprint('=== Variety:')\nfor v in B.zeros():\n print(zip(R.symbols, v))\n",
"step-3": "<mask token>\nnp.set_printoptions(precision=3)\n<mask token>\nsp.init_printing()\nR, x, y = sp.ring('x,y', sp.RR, order=sp.grevlex)\nI = [x ** 2 + y ** 2 - 1.0, x + y]\nR, x, y, z = sp.ring('x,y,z', sp.RR, order=sp.grevlex)\nI = [x ** 2 - 1, y ** 2 - 4, z ** 2 - 9]\nn = 4\nRvs = sp.ring(' '.join('v' + str(i) for i in range(1, n + 1)), sp.RR, order\n =sp.grevlex)\nR, vs = Rvs[0], Rvs[1:]\nI = []\nI.extend([(v ** 2 - 1) for v in vs])\nprint('Generating')\nB = BB.BorderBasisFactory(1e-05).generate(R, I)\nprint('Done')\nprint('=== Generator Basis:')\nfor f in B.generator_basis:\n display(f.as_expr())\nprint('=== Quotient Basis:')\nfor f in B.quotient_basis():\n display(f.as_expr())\nprint('=== Variety:')\nfor v in B.zeros():\n print(zip(R.symbols, v))\n",
"step-4": "from __future__ import print_function\nimport sympy as sp\nimport numpy as np\nimport BorderBasis as BB\nnp.set_printoptions(precision=3)\nfrom IPython.display import display, Markdown, Math\nsp.init_printing()\nR, x, y = sp.ring('x,y', sp.RR, order=sp.grevlex)\nI = [x ** 2 + y ** 2 - 1.0, x + y]\nR, x, y, z = sp.ring('x,y,z', sp.RR, order=sp.grevlex)\nI = [x ** 2 - 1, y ** 2 - 4, z ** 2 - 9]\nn = 4\nRvs = sp.ring(' '.join('v' + str(i) for i in range(1, n + 1)), sp.RR, order\n =sp.grevlex)\nR, vs = Rvs[0], Rvs[1:]\nI = []\nI.extend([(v ** 2 - 1) for v in vs])\nprint('Generating')\nB = BB.BorderBasisFactory(1e-05).generate(R, I)\nprint('Done')\nprint('=== Generator Basis:')\nfor f in B.generator_basis:\n display(f.as_expr())\nprint('=== Quotient Basis:')\nfor f in B.quotient_basis():\n display(f.as_expr())\nprint('=== Variety:')\nfor v in B.zeros():\n print(zip(R.symbols, v))\n",
"step-5": "# Percy's playground.\n\nfrom __future__ import print_function\nimport sympy as sp\nimport numpy as np\nimport BorderBasis as BB\nnp.set_printoptions(precision=3)\nfrom IPython.display import display, Markdown, Math\nsp.init_printing()\n\nR, x, y = sp.ring('x,y', sp.RR, order=sp.grevlex)\nI = [ x**2 + y**2 - 1.0, x + y ]\n\nR, x, y, z = sp.ring('x,y,z', sp.RR, order=sp.grevlex)\nI = [ x**2 - 1, y**2 - 4, z**2 - 9]\n\n# n = 4 takes a long time\nn = 4\nRvs = sp.ring(' '.join('v'+str(i) for i in range(1, n + 1)), sp.RR, order=sp.grevlex)\nR, vs = Rvs[0], Rvs[1:]\nI = []\nI.extend([v**2 - 1 for v in vs])\n#I.extend([(v-1)**2 for v in vs])\n#I.extend([v-1 for v in vs])\n#I.extend([vs[i] - vs[i-1] for i in range(1, len(vs))]) # Makes it fast\n\nprint('Generating')\nB = BB.BorderBasisFactory(1e-5).generate(R,I)\nprint('Done')\n\nprint(\"=== Generator Basis:\")\nfor f in B.generator_basis:\n display(f.as_expr())\n\nprint(\"=== Quotient Basis:\")\nfor f in B.quotient_basis():\n display(f.as_expr())\n\n# v2 is always zero\nprint(\"=== Variety:\")\nfor v in B.zeros():\n print(zip(R.symbols, v))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
obj = NistschemaSvIvListNmtokensMinLength5(value=['f', 'D', 'T', 'a', 'b',
'C', 'o', 't', 't', 'w'])
<|reserved_special_token_1|>
from output.models.nist_data.list_pkg.nmtokens.schema_instance.nistschema_sv_iv_list_nmtokens_min_length_5_xsd.nistschema_sv_iv_list_nmtokens_min_length_5 import NistschemaSvIvListNmtokensMinLength5
obj = NistschemaSvIvListNmtokensMinLength5(value=['f', 'D', 'T', 'a', 'b',
'C', 'o', 't', 't', 'w'])
<|reserved_special_token_1|>
from output.models.nist_data.list_pkg.nmtokens.schema_instance.nistschema_sv_iv_list_nmtokens_min_length_5_xsd.nistschema_sv_iv_list_nmtokens_min_length_5 import NistschemaSvIvListNmtokensMinLength5
obj = NistschemaSvIvListNmtokensMinLength5(
value=[
"f",
"D",
"T",
"a",
"b",
"C",
"o",
"t",
"t",
"w",
]
)
|
flexible
|
{
"blob_id": "3941f283893c259033d7fb3be83c8071433064ba",
"index": 7170,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nobj = NistschemaSvIvListNmtokensMinLength5(value=['f', 'D', 'T', 'a', 'b',\n 'C', 'o', 't', 't', 'w'])\n",
"step-3": "from output.models.nist_data.list_pkg.nmtokens.schema_instance.nistschema_sv_iv_list_nmtokens_min_length_5_xsd.nistschema_sv_iv_list_nmtokens_min_length_5 import NistschemaSvIvListNmtokensMinLength5\nobj = NistschemaSvIvListNmtokensMinLength5(value=['f', 'D', 'T', 'a', 'b',\n 'C', 'o', 't', 't', 'w'])\n",
"step-4": "from output.models.nist_data.list_pkg.nmtokens.schema_instance.nistschema_sv_iv_list_nmtokens_min_length_5_xsd.nistschema_sv_iv_list_nmtokens_min_length_5 import NistschemaSvIvListNmtokensMinLength5\n\n\nobj = NistschemaSvIvListNmtokensMinLength5(\n value=[\n \"f\",\n \"D\",\n \"T\",\n \"a\",\n \"b\",\n \"C\",\n \"o\",\n \"t\",\n \"t\",\n \"w\",\n ]\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# coding: utf-8
# Time complexity: O()
# Space complexity: O()
import math
# 最大公约数 Greatest common divisor
def get_gcd(a, b):
if b == 0:
return a
print(a, b)
return get_gcd(b, a % b)
get_gcd(48, 30)
# 计算约数个数
# 时间复杂度 O(n)
def divisor1(num):
count = 0
for i in range(1, num + 1):
if num % i == 0:
count += 1
return count
# count prime,
class Solution:
def countPrimes(self, n: int) -> int:
if n < 3:
return 0
primes = [True] * n
primes[0] = primes[1] = False
for i in range(int(n ** 0.5) + 1):
if primes[i]:
for j in range(i + i, n, i): # delete all its multiples
primes[j] = False
return sum(primes)
# Use upper limit of (n**0.5)+1, because:
# (a) the smallest factor of a non-prime number will not be > sqrt(n).
# Ex. non-prime = 100,
# 5*20
# 10*10,
# 20*5 # !! we have seen 5 before.
# 判断prime,因为所有prime都是6n+1或者6n-1,同时我们只需要计算到sqrt(n)就可以
def find_primer(n):
if n <= 3:
return n > 1
if n%6 != 1 and n%6 != 5:
return False
for i in range(5, int(n**0.5)+1, 6):
if n%i == 0 or n %(i+2) == 0:
return False
return True
# 计算约数个数
# 时间复杂度 O( sqrt(n) )
def divisor2(num):
count = 0
sqrt = int(num ** 0.5)
for x in range(1, sqrt + 1):
if num % x == 0:
count += 2
print(x, num // x)
return count - (sqrt ** 2 == num)
# power of 4
class Solution:
def isPowerOfFour(self, n: int) -> bool:
if n <= 0:
return False
return n & (n - 1) == 0 and n & 0xAAAAAAAA == 0
# power of 2
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
return n > 0 and n & (n - 1) == 0
# power of 2
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
return n > 0 and (math.log10(n) / math.log10(2)) % 1 == 0
# devide
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
"""
a / b = c
keep subtracting b, a faster way is to -2*b, -4*b, -1024*b
if a > 2 * b => c should be bigger than 2 (1<<1)
if a > 4 * b => c should be bigger than 4 (1<<2)
if a > 1024 * b => c should be bigger than 1024 (1<<10)
a might == 1024*b + 4*b + 2*b
c = (1024+4+2)
2 * b == b << 1
1024 * b == b << 10
"""
sig = (dividend < 0) == (divisor < 0)
a, b, res = abs(dividend), abs(divisor), 0
while a >= b:
shift = 0
while a >= b << (shift + 1):
print(a, res)
shift += 1
res += 1 << shift
a -= b << shift
return min(res if sig else -res, (1 << 31) - 1)
# power
class Solution:
def myPow(self, x: float, n: int) -> float:
if n == 0:
return 1
if n < 0:
n = -n
x = 1 / x
if n & 1 == 0:
return self.myPow(x * x, n >> 1)
else:
return x * self.myPow(x * x, n >> 1)
# sqrt
class Solution:
def mySqrt(self, x: int) -> int:
l = 1
r = x
while l <= r:
# print(l,r)
mid = (l + r) // 2
if mid * mid == x:
return mid
elif mid * mid > x:
r = mid - 1
else:
l = mid + 1
return r
# root of number, x is the number and n is the root
def root(x, n):
if x == 0:
return 0
low = 0
hi = max(1, x)
root = (low+hi) / 2.0
while root - low >= 0.001:
if root**n > x:
hi = root
elif root**n < x:
low = root
else:
break
root = (low+hi) / 2.0
return root
|
normal
|
{
"blob_id": "32066db8b43bc70c564cce5a33f50921285b3627",
"index": 6477,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\nclass Solution:\n\n def isPowerOfTwo(self, n: int) ->bool:\n return n > 0 and n & n - 1 == 0\n\n\nclass Solution:\n\n def isPowerOfTwo(self, n: int) ->bool:\n return n > 0 and math.log10(n) / math.log10(2) % 1 == 0\n\n\nclass Solution:\n\n def divide(self, dividend: int, divisor: int) ->int:\n \"\"\"\n a / b = c\n \n keep subtracting b, a faster way is to -2*b, -4*b, -1024*b\n\n if a > 2 * b => c should be bigger than 2 (1<<1)\n if a > 4 * b => c should be bigger than 4 (1<<2)\n if a > 1024 * b => c should be bigger than 1024 (1<<10)\n\n a might == 1024*b + 4*b + 2*b\n c = (1024+4+2)\n\n 2 * b == b << 1\n 1024 * b == b << 10\n \n \"\"\"\n sig = (dividend < 0) == (divisor < 0)\n a, b, res = abs(dividend), abs(divisor), 0\n while a >= b:\n shift = 0\n while a >= b << shift + 1:\n print(a, res)\n shift += 1\n res += 1 << shift\n a -= b << shift\n return min(res if sig else -res, (1 << 31) - 1)\n\n\nclass Solution:\n\n def myPow(self, x: float, n: int) ->float:\n if n == 0:\n return 1\n if n < 0:\n n = -n\n x = 1 / x\n if n & 1 == 0:\n return self.myPow(x * x, n >> 1)\n else:\n return x * self.myPow(x * x, n >> 1)\n\n\nclass Solution:\n\n def mySqrt(self, x: int) ->int:\n l = 1\n r = x\n while l <= r:\n mid = (l + r) // 2\n if mid * mid == x:\n return mid\n elif mid * mid > x:\n r = mid - 1\n else:\n l = mid + 1\n return r\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_gcd(a, b):\n if b == 0:\n return a\n print(a, b)\n return get_gcd(b, a % b)\n\n\n<mask token>\n\n\ndef divisor1(num):\n count = 0\n for i in range(1, num + 1):\n if num % i == 0:\n count += 1\n return count\n\n\nclass Solution:\n\n def countPrimes(self, n: int) ->int:\n if n < 3:\n return 0\n primes = [True] * n\n primes[0] = primes[1] = False\n for i in range(int(n ** 0.5) + 1):\n if primes[i]:\n for j in range(i + i, n, i):\n primes[j] = False\n return sum(primes)\n\n\ndef find_primer(n):\n if n <= 3:\n return n > 1\n if n % 6 != 1 and n % 6 != 5:\n return False\n for i in range(5, int(n ** 0.5) + 1, 6):\n if n % i == 0 or n % (i + 2) == 0:\n return False\n return True\n\n\n<mask token>\n\n\nclass Solution:\n\n def isPowerOfFour(self, n: int) ->bool:\n if n <= 0:\n return False\n return n & n - 1 == 0 and n & 2863311530 == 0\n\n\nclass Solution:\n\n def isPowerOfTwo(self, n: int) ->bool:\n return n > 0 and n & n - 1 == 0\n\n\nclass Solution:\n\n def isPowerOfTwo(self, n: int) ->bool:\n return n > 0 and math.log10(n) / math.log10(2) % 1 == 0\n\n\nclass Solution:\n\n def divide(self, dividend: int, divisor: int) ->int:\n \"\"\"\n a / b = c\n \n keep subtracting b, a faster way is to -2*b, -4*b, -1024*b\n\n if a > 2 * b => c should be bigger than 2 (1<<1)\n if a > 4 * b => c should be bigger than 4 (1<<2)\n if a > 1024 * b => c should be bigger than 1024 (1<<10)\n\n a might == 1024*b + 4*b + 2*b\n c = (1024+4+2)\n\n 2 * b == b << 1\n 1024 * b == b << 10\n \n \"\"\"\n sig = (dividend < 0) == (divisor < 0)\n a, b, res = abs(dividend), abs(divisor), 0\n while a >= b:\n shift = 0\n while a >= b << shift + 1:\n print(a, res)\n shift += 1\n res += 1 << shift\n a -= b << shift\n return min(res if sig else -res, (1 << 31) - 1)\n\n\nclass Solution:\n\n def myPow(self, x: float, n: int) ->float:\n if n == 0:\n return 1\n if n < 0:\n n = -n\n x = 1 / x\n if n & 1 == 0:\n return self.myPow(x * x, n >> 1)\n else:\n return x * self.myPow(x * x, n >> 1)\n\n\nclass Solution:\n\n def mySqrt(self, x: int) ->int:\n l = 1\n r = x\n while l <= r:\n mid = (l + r) // 2\n if mid * mid == x:\n return mid\n elif mid * mid > x:\n r = mid - 1\n else:\n l = mid + 1\n return r\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_gcd(a, b):\n if b == 0:\n return a\n print(a, b)\n return get_gcd(b, a % b)\n\n\n<mask token>\n\n\ndef divisor1(num):\n count = 0\n for i in range(1, num + 1):\n if num % i == 0:\n count += 1\n return count\n\n\nclass Solution:\n\n def countPrimes(self, n: int) ->int:\n if n < 3:\n return 0\n primes = [True] * n\n primes[0] = primes[1] = False\n for i in range(int(n ** 0.5) + 1):\n if primes[i]:\n for j in range(i + i, n, i):\n primes[j] = False\n return sum(primes)\n\n\ndef find_primer(n):\n if n <= 3:\n return n > 1\n if n % 6 != 1 and n % 6 != 5:\n return False\n for i in range(5, int(n ** 0.5) + 1, 6):\n if n % i == 0 or n % (i + 2) == 0:\n return False\n return True\n\n\ndef divisor2(num):\n count = 0\n sqrt = int(num ** 0.5)\n for x in range(1, sqrt + 1):\n if num % x == 0:\n count += 2\n print(x, num // x)\n return count - (sqrt ** 2 == num)\n\n\nclass Solution:\n\n def isPowerOfFour(self, n: int) ->bool:\n if n <= 0:\n return False\n return n & n - 1 == 0 and n & 2863311530 == 0\n\n\nclass Solution:\n\n def isPowerOfTwo(self, n: int) ->bool:\n return n > 0 and n & n - 1 == 0\n\n\nclass Solution:\n\n def isPowerOfTwo(self, n: int) ->bool:\n return n > 0 and math.log10(n) / math.log10(2) % 1 == 0\n\n\nclass Solution:\n\n def divide(self, dividend: int, divisor: int) ->int:\n \"\"\"\n a / b = c\n \n keep subtracting b, a faster way is to -2*b, -4*b, -1024*b\n\n if a > 2 * b => c should be bigger than 2 (1<<1)\n if a > 4 * b => c should be bigger than 4 (1<<2)\n if a > 1024 * b => c should be bigger than 1024 (1<<10)\n\n a might == 1024*b + 4*b + 2*b\n c = (1024+4+2)\n\n 2 * b == b << 1\n 1024 * b == b << 10\n \n \"\"\"\n sig = (dividend < 0) == (divisor < 0)\n a, b, res = abs(dividend), abs(divisor), 0\n while a >= b:\n shift = 0\n while a >= b << shift + 1:\n print(a, res)\n shift += 1\n res += 1 << shift\n a -= b << shift\n return min(res if sig else -res, (1 << 31) - 1)\n\n\nclass Solution:\n\n def myPow(self, x: float, n: int) ->float:\n if n == 0:\n return 1\n if n < 0:\n n = -n\n x = 1 / x\n if n & 1 == 0:\n return self.myPow(x * x, n >> 1)\n else:\n return x * self.myPow(x * x, n >> 1)\n\n\nclass Solution:\n\n def mySqrt(self, x: int) ->int:\n l = 1\n r = x\n while l <= r:\n mid = (l + r) // 2\n if mid * mid == x:\n return mid\n elif mid * mid > x:\n r = mid - 1\n else:\n l = mid + 1\n return r\n\n\ndef root(x, n):\n if x == 0:\n return 0\n low = 0\n hi = max(1, x)\n root = (low + hi) / 2.0\n while root - low >= 0.001:\n if root ** n > x:\n hi = root\n elif root ** n < x:\n low = root\n else:\n break\n root = (low + hi) / 2.0\n return root\n",
"step-4": "import math\n\n\ndef get_gcd(a, b):\n if b == 0:\n return a\n print(a, b)\n return get_gcd(b, a % b)\n\n\nget_gcd(48, 30)\n\n\ndef divisor1(num):\n count = 0\n for i in range(1, num + 1):\n if num % i == 0:\n count += 1\n return count\n\n\nclass Solution:\n\n def countPrimes(self, n: int) ->int:\n if n < 3:\n return 0\n primes = [True] * n\n primes[0] = primes[1] = False\n for i in range(int(n ** 0.5) + 1):\n if primes[i]:\n for j in range(i + i, n, i):\n primes[j] = False\n return sum(primes)\n\n\ndef find_primer(n):\n if n <= 3:\n return n > 1\n if n % 6 != 1 and n % 6 != 5:\n return False\n for i in range(5, int(n ** 0.5) + 1, 6):\n if n % i == 0 or n % (i + 2) == 0:\n return False\n return True\n\n\ndef divisor2(num):\n count = 0\n sqrt = int(num ** 0.5)\n for x in range(1, sqrt + 1):\n if num % x == 0:\n count += 2\n print(x, num // x)\n return count - (sqrt ** 2 == num)\n\n\nclass Solution:\n\n def isPowerOfFour(self, n: int) ->bool:\n if n <= 0:\n return False\n return n & n - 1 == 0 and n & 2863311530 == 0\n\n\nclass Solution:\n\n def isPowerOfTwo(self, n: int) ->bool:\n return n > 0 and n & n - 1 == 0\n\n\nclass Solution:\n\n def isPowerOfTwo(self, n: int) ->bool:\n return n > 0 and math.log10(n) / math.log10(2) % 1 == 0\n\n\nclass Solution:\n\n def divide(self, dividend: int, divisor: int) ->int:\n \"\"\"\n a / b = c\n \n keep subtracting b, a faster way is to -2*b, -4*b, -1024*b\n\n if a > 2 * b => c should be bigger than 2 (1<<1)\n if a > 4 * b => c should be bigger than 4 (1<<2)\n if a > 1024 * b => c should be bigger than 1024 (1<<10)\n\n a might == 1024*b + 4*b + 2*b\n c = (1024+4+2)\n\n 2 * b == b << 1\n 1024 * b == b << 10\n \n \"\"\"\n sig = (dividend < 0) == (divisor < 0)\n a, b, res = abs(dividend), abs(divisor), 0\n while a >= b:\n shift = 0\n while a >= b << shift + 1:\n print(a, res)\n shift += 1\n res += 1 << shift\n a -= b << shift\n return min(res if sig else -res, (1 << 31) - 1)\n\n\nclass Solution:\n\n def myPow(self, x: float, n: int) ->float:\n if n == 0:\n return 1\n if n < 0:\n n = -n\n x = 1 / x\n if n & 1 == 0:\n return self.myPow(x * x, n >> 1)\n else:\n return x * self.myPow(x * x, n >> 1)\n\n\nclass Solution:\n\n def mySqrt(self, x: int) ->int:\n l = 1\n r = x\n while l <= r:\n mid = (l + r) // 2\n if mid * mid == x:\n return mid\n elif mid * mid > x:\n r = mid - 1\n else:\n l = mid + 1\n return r\n\n\ndef root(x, n):\n if x == 0:\n return 0\n low = 0\n hi = max(1, x)\n root = (low + hi) / 2.0\n while root - low >= 0.001:\n if root ** n > x:\n hi = root\n elif root ** n < x:\n low = root\n else:\n break\n root = (low + hi) / 2.0\n return root\n",
"step-5": "#!/usr/bin/env python3\n# coding: utf-8\n\n# Time complexity: O()\n# Space complexity: O()\n\nimport math\n\n# 最大公约数 Greatest common divisor\ndef get_gcd(a, b):\n if b == 0:\n return a\n print(a, b)\n return get_gcd(b, a % b)\n\nget_gcd(48, 30)\n\n# 计算约数个数\n# 时间复杂度 O(n)\ndef divisor1(num):\n count = 0\n for i in range(1, num + 1):\n if num % i == 0:\n count += 1\n\n return count\n\n\n# count prime, \nclass Solution:\n def countPrimes(self, n: int) -> int:\n if n < 3:\n return 0\n primes = [True] * n\n primes[0] = primes[1] = False\n for i in range(int(n ** 0.5) + 1):\n if primes[i]:\n for j in range(i + i, n, i): # delete all its multiples\n primes[j] = False\n return sum(primes)\n\n\n# Use upper limit of (n**0.5)+1, because:\n# (a) the smallest factor of a non-prime number will not be > sqrt(n).\n# Ex. non-prime = 100,\n# 5*20\n# 10*10,\n# 20*5 # !! we have seen 5 before.\n\n\n\n# 判断prime,因为所有prime都是6n+1或者6n-1,同时我们只需要计算到sqrt(n)就可以\ndef find_primer(n):\n if n <= 3:\n return n > 1\n if n%6 != 1 and n%6 != 5:\n return False\n for i in range(5, int(n**0.5)+1, 6):\n if n%i == 0 or n %(i+2) == 0:\n return False\n return True\n\n\n# 计算约数个数\n# 时间复杂度 O( sqrt(n) )\ndef divisor2(num):\n count = 0\n sqrt = int(num ** 0.5)\n for x in range(1, sqrt + 1):\n if num % x == 0:\n count += 2\n print(x, num // x)\n return count - (sqrt ** 2 == num)\n\n\n# power of 4\nclass Solution:\n def isPowerOfFour(self, n: int) -> bool:\n if n <= 0:\n return False\n return n & (n - 1) == 0 and n & 0xAAAAAAAA == 0\n\n\n# power of 2\nclass Solution:\n def isPowerOfTwo(self, n: int) -> bool:\n return n > 0 and n & (n - 1) == 0\n\n\n# power of 2\nclass Solution:\n def isPowerOfTwo(self, n: int) -> bool:\n return n > 0 and (math.log10(n) / math.log10(2)) % 1 == 0\n\n\n# devide\nclass Solution:\n def divide(self, dividend: int, divisor: int) -> int:\n \"\"\"\n a / b = c\n \n keep subtracting b, a faster way is to -2*b, -4*b, -1024*b\n\n if a > 2 * b => c should be bigger than 2 (1<<1)\n if a > 4 * b => c should be bigger than 4 (1<<2)\n if a > 1024 * b => c should be bigger than 1024 (1<<10)\n\n a might == 1024*b + 4*b + 2*b\n c = (1024+4+2)\n\n 2 * b == b << 1\n 1024 * b == b << 10\n \n \"\"\"\n sig = (dividend < 0) == (divisor < 0)\n a, b, res = abs(dividend), abs(divisor), 0\n while a >= b:\n shift = 0\n while a >= b << (shift + 1):\n print(a, res)\n shift += 1\n res += 1 << shift\n a -= b << shift\n return min(res if sig else -res, (1 << 31) - 1)\n\n\n# power\nclass Solution:\n def myPow(self, x: float, n: int) -> float:\n if n == 0:\n return 1\n if n < 0:\n n = -n\n x = 1 / x\n if n & 1 == 0:\n return self.myPow(x * x, n >> 1)\n else:\n return x * self.myPow(x * x, n >> 1)\n\n# sqrt\nclass Solution:\n def mySqrt(self, x: int) -> int:\n l = 1\n r = x\n while l <= r:\n # print(l,r)\n mid = (l + r) // 2\n if mid * mid == x:\n return mid\n elif mid * mid > x:\n r = mid - 1\n else:\n l = mid + 1\n\n return r\n\n\n# root of number, x is the number and n is the root\ndef root(x, n):\n if x == 0:\n return 0\n \n low = 0\n hi = max(1, x)\n root = (low+hi) / 2.0\n \n while root - low >= 0.001:\n if root**n > x:\n hi = root\n elif root**n < x:\n low = root\n else:\n break\n root = (low+hi) / 2.0\n \n return root",
"step-ids": [
11,
17,
19,
21,
22
]
}
|
[
11,
17,
19,
21,
22
] |
import twitter
def twitter_authenticate():
return;
def identify_dupes():
return;
def remove_dupes():
return;
def get_tweets():
return;
|
normal
|
{
"blob_id": "d4683d055ca70f31b050f0d84cb93c030feb4593",
"index": 2179,
"step-1": "<mask token>\n\n\ndef twitter_authenticate():\n return\n\n\n<mask token>\n\n\ndef get_tweets():\n return\n",
"step-2": "<mask token>\n\n\ndef twitter_authenticate():\n return\n\n\n<mask token>\n\n\ndef remove_dupes():\n return\n\n\ndef get_tweets():\n return\n",
"step-3": "<mask token>\n\n\ndef twitter_authenticate():\n return\n\n\ndef identify_dupes():\n return\n\n\ndef remove_dupes():\n return\n\n\ndef get_tweets():\n return\n",
"step-4": "import twitter\n\n\ndef twitter_authenticate():\n return\n\n\ndef identify_dupes():\n return\n\n\ndef remove_dupes():\n return\n\n\ndef get_tweets():\n return\n",
"step-5": "import twitter\n\ndef twitter_authenticate():\n return;\n\ndef identify_dupes():\n return;\n\ndef remove_dupes():\n return;\n\ndef get_tweets():\n return;\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
import random
def sim_data():
# Parameters
n_samples = random.randint(500, 5000)
n_features = random.randint(5, 25)
n_informative = random.randint(5, n_features)
noise = random.uniform(0.5, 2)
# Simulate data
X, y = make_regression(n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
noise=noise)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y)
# Param dict
params = {"n_samples": n_samples,
"n_features": n_features,
"n_informative": n_informative,
"noise": noise}
# Return
return X_train, y_train, X_test, y_test, params
|
normal
|
{
"blob_id": "c4aa5869d5f916f13aa924c19dc9792337619b31",
"index": 4011,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sim_data():\n n_samples = random.randint(500, 5000)\n n_features = random.randint(5, 25)\n n_informative = random.randint(5, n_features)\n noise = random.uniform(0.5, 2)\n X, y = make_regression(n_samples=n_samples, n_features=n_features,\n n_informative=n_informative, noise=noise)\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n params = {'n_samples': n_samples, 'n_features': n_features,\n 'n_informative': n_informative, 'noise': noise}\n return X_train, y_train, X_test, y_test, params\n",
"step-3": "from sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nimport random\n\n\ndef sim_data():\n n_samples = random.randint(500, 5000)\n n_features = random.randint(5, 25)\n n_informative = random.randint(5, n_features)\n noise = random.uniform(0.5, 2)\n X, y = make_regression(n_samples=n_samples, n_features=n_features,\n n_informative=n_informative, noise=noise)\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n params = {'n_samples': n_samples, 'n_features': n_features,\n 'n_informative': n_informative, 'noise': noise}\n return X_train, y_train, X_test, y_test, params\n",
"step-4": "from sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nimport random\n\ndef sim_data():\n\n # Parameters\n n_samples = random.randint(500, 5000)\n n_features = random.randint(5, 25)\n n_informative = random.randint(5, n_features)\n noise = random.uniform(0.5, 2)\n\n # Simulate data\n X, y = make_regression(n_samples=n_samples,\n n_features=n_features,\n n_informative=n_informative,\n noise=noise)\n\n # Train test split\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n\n # Param dict\n params = {\"n_samples\": n_samples,\n \"n_features\": n_features,\n \"n_informative\": n_informative,\n \"noise\": noise}\n\n # Return\n return X_train, y_train, X_test, y_test, params\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
IEX_CLOUD_API_TOKEN = 'Tpk_5d9dc536610243cda2c8ef4787d729b6'
|
normal
|
{
"blob_id": "86849d0e63cdb93a16497ca56ff9c64c15a60fa7",
"index": 4891,
"step-1": "<mask token>\n",
"step-2": "IEX_CLOUD_API_TOKEN = 'Tpk_5d9dc536610243cda2c8ef4787d729b6'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
model.load_state_dict(ckpt['model_state_dict'])
model.eval()
model.to(device)
<|reserved_special_token_0|>
model_face.eval()
<|reserved_special_token_0|>
for img in tqdm(video):
if img is not None:
annotation = model_face.predict_jsons(img)
max_thresh = annotation[0]['score']
bbox = annotation[0]['bbox']
if max_thresh > 0.3:
max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]
x1 = bbox[0]
y1 = bbox[1]
x2 = bbox[2]
y2 = bbox[3]
x1_face = bbox[0] - 20
y1_face = bbox[1] - 20
x2_face = bbox[2] + 20
y2_face = bbox[3] + 20
if x1_face > 0 and y1_face > 0:
img_face = img[y1_face:y2_face, x1_face:x2_face]
imageio.imwrite('face.jpg', img_face)
img_face = Image.fromarray(img_face)
img_face = transform(img_face)
img_face = torch.unsqueeze(img_face, 0)
img_face = img_face.to(device)
gen_pred, age_cls_pred, age_reg_pred = model(img_face)
_, gen_preds = torch.max(gen_pred, 1)
_, age_cls_pred = torch.max(age_cls_pred, 1)
if gen_preds.item() == 1:
text = f'M:{int(age_reg_pred.item() * 100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),
color=(255, 0, 0), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.
FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0
), thickness=2, lineType=cv2.LINE_AA)
elif gen_preds.item() == 0:
text = f'F:{int(age_reg_pred.item() * 100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),
color=(0, 0, 255), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.
FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255
), thickness=2, lineType=cv2.LINE_AA)
out_video.append_data(img)
out_video.close()
print('Done')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
transform = transforms.Compose([transforms.Resize((112, 112)), transforms.
ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023,
0.1994, 0.201))])
model = MixModel()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ckpt = torch.load('outputs_w_free/model_epoch_50.pth')
model.load_state_dict(ckpt['model_state_dict'])
model.eval()
model.to(device)
model_face = get_model('resnet50_2020-07-20', max_size=512, device='cuda:1')
model_face.eval()
detector = dlib.get_frontal_face_detector()
FPS = 30
out_video = imageio.get_writer('/home/cybercore/haimd/w_freeze_osaka.mp4',
format='mp4', mode='I', fps=FPS)
video = imageio.get_reader('/home/cybercore/haimd/osaka.mp4')
for img in tqdm(video):
if img is not None:
annotation = model_face.predict_jsons(img)
max_thresh = annotation[0]['score']
bbox = annotation[0]['bbox']
if max_thresh > 0.3:
max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]
x1 = bbox[0]
y1 = bbox[1]
x2 = bbox[2]
y2 = bbox[3]
x1_face = bbox[0] - 20
y1_face = bbox[1] - 20
x2_face = bbox[2] + 20
y2_face = bbox[3] + 20
if x1_face > 0 and y1_face > 0:
img_face = img[y1_face:y2_face, x1_face:x2_face]
imageio.imwrite('face.jpg', img_face)
img_face = Image.fromarray(img_face)
img_face = transform(img_face)
img_face = torch.unsqueeze(img_face, 0)
img_face = img_face.to(device)
gen_pred, age_cls_pred, age_reg_pred = model(img_face)
_, gen_preds = torch.max(gen_pred, 1)
_, age_cls_pred = torch.max(age_cls_pred, 1)
if gen_preds.item() == 1:
text = f'M:{int(age_reg_pred.item() * 100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),
color=(255, 0, 0), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.
FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0
), thickness=2, lineType=cv2.LINE_AA)
elif gen_preds.item() == 0:
text = f'F:{int(age_reg_pred.item() * 100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),
color=(0, 0, 255), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.
FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255
), thickness=2, lineType=cv2.LINE_AA)
out_video.append_data(img)
out_video.close()
print('Done')
<|reserved_special_token_1|>
import dlib
import cv2
import imageio
import torch
from PIL import Image
from model import AgeGenderModel
from mix_model import MixModel
from torchvision.transforms import transforms
from tqdm import tqdm
from retinaface.pre_trained_models import get_model
transform = transforms.Compose([transforms.Resize((112, 112)), transforms.
ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023,
0.1994, 0.201))])
model = MixModel()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ckpt = torch.load('outputs_w_free/model_epoch_50.pth')
model.load_state_dict(ckpt['model_state_dict'])
model.eval()
model.to(device)
model_face = get_model('resnet50_2020-07-20', max_size=512, device='cuda:1')
model_face.eval()
detector = dlib.get_frontal_face_detector()
FPS = 30
out_video = imageio.get_writer('/home/cybercore/haimd/w_freeze_osaka.mp4',
format='mp4', mode='I', fps=FPS)
video = imageio.get_reader('/home/cybercore/haimd/osaka.mp4')
for img in tqdm(video):
if img is not None:
annotation = model_face.predict_jsons(img)
max_thresh = annotation[0]['score']
bbox = annotation[0]['bbox']
if max_thresh > 0.3:
max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]
x1 = bbox[0]
y1 = bbox[1]
x2 = bbox[2]
y2 = bbox[3]
x1_face = bbox[0] - 20
y1_face = bbox[1] - 20
x2_face = bbox[2] + 20
y2_face = bbox[3] + 20
if x1_face > 0 and y1_face > 0:
img_face = img[y1_face:y2_face, x1_face:x2_face]
imageio.imwrite('face.jpg', img_face)
img_face = Image.fromarray(img_face)
img_face = transform(img_face)
img_face = torch.unsqueeze(img_face, 0)
img_face = img_face.to(device)
gen_pred, age_cls_pred, age_reg_pred = model(img_face)
_, gen_preds = torch.max(gen_pred, 1)
_, age_cls_pred = torch.max(age_cls_pred, 1)
if gen_preds.item() == 1:
text = f'M:{int(age_reg_pred.item() * 100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),
color=(255, 0, 0), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.
FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0
), thickness=2, lineType=cv2.LINE_AA)
elif gen_preds.item() == 0:
text = f'F:{int(age_reg_pred.item() * 100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),
color=(0, 0, 255), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.
FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255
), thickness=2, lineType=cv2.LINE_AA)
out_video.append_data(img)
out_video.close()
print('Done')
<|reserved_special_token_1|>
import dlib
import cv2
import imageio
import torch
from PIL import Image
from model import AgeGenderModel
from mix_model import MixModel
from torchvision.transforms import transforms
from tqdm import tqdm
from retinaface.pre_trained_models import get_model
transform = transforms.Compose([
transforms.Resize((112, 112)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
# Load model age gender
model = MixModel()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ckpt = torch.load("outputs_w_free/model_epoch_50.pth")
model.load_state_dict(ckpt['model_state_dict'])
model.eval()
model.to(device)
model_face = get_model("resnet50_2020-07-20", max_size=512, device='cuda:1')
model_face.eval()
# load the detector
detector = dlib.get_frontal_face_detector()
FPS = 30
# read the video
out_video = imageio.get_writer("/home/cybercore/haimd/w_freeze_osaka.mp4", format='mp4', mode='I', fps=FPS)
video = imageio.get_reader("/home/cybercore/haimd/osaka.mp4")
for img in tqdm(video):
if img is not None:
# gray = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY)
# faces = detector(gray)
annotation = model_face.predict_jsons(img)
max_thresh = annotation[0]['score']
bbox = annotation[0]['bbox']
if max_thresh > 0.3:
max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]
# for face in faces:
# print(face)
x1 = bbox[0]
y1 = bbox[1]
x2 = bbox[2]
y2 = bbox[3]
x1_face = bbox[0]-20
y1_face = bbox[1]-20
x2_face = bbox[2]+20
y2_face = bbox[3]+20
if x1_face > 0 and y1_face > 0:
img_face = img[y1_face:y2_face, x1_face:x2_face]
imageio.imwrite('face.jpg', img_face)
img_face = Image.fromarray(img_face)
img_face = transform(img_face)
img_face = torch.unsqueeze(img_face, 0)
img_face = img_face.to(device)
gen_pred, age_cls_pred, age_reg_pred = model(img_face)
_, gen_preds = torch.max(gen_pred, 1)
_, age_cls_pred = torch.max(age_cls_pred, 1)
if gen_preds.item() == 1:
text = f'M:{int(age_reg_pred.item()*100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(255,0,0), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_AA)
elif gen_preds.item() == 0:
text = f'F:{int(age_reg_pred.item()*100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(0,0,255), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)
out_video.append_data(img)
out_video.close()
print('Done')
|
flexible
|
{
"blob_id": "1cc14836808d70c1e53a9ca948a52776ebc89f4a",
"index": 4624,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\n<mask token>\nmodel_face.eval()\n<mask token>\nfor img in tqdm(video):\n if img is not None:\n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n x1_face = bbox[0] - 20\n y1_face = bbox[1] - 20\n x2_face = bbox[2] + 20\n y2_face = bbox[3] + 20\n if x1_face > 0 and y1_face > 0:\n img_face = img[y1_face:y2_face, x1_face:x2_face]\n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device)\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(255, 0, 0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0\n ), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(0, 0, 255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255\n ), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n",
"step-3": "<mask token>\ntransform = transforms.Compose([transforms.Resize((112, 112)), transforms.\n ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, \n 0.1994, 0.201))])\nmodel = MixModel()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nckpt = torch.load('outputs_w_free/model_epoch_50.pth')\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\nmodel_face = get_model('resnet50_2020-07-20', max_size=512, device='cuda:1')\nmodel_face.eval()\ndetector = dlib.get_frontal_face_detector()\nFPS = 30\nout_video = imageio.get_writer('/home/cybercore/haimd/w_freeze_osaka.mp4',\n format='mp4', mode='I', fps=FPS)\nvideo = imageio.get_reader('/home/cybercore/haimd/osaka.mp4')\nfor img in tqdm(video):\n if img is not None:\n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n x1_face = bbox[0] - 20\n y1_face = bbox[1] - 20\n x2_face = bbox[2] + 20\n y2_face = bbox[3] + 20\n if x1_face > 0 and y1_face > 0:\n img_face = img[y1_face:y2_face, x1_face:x2_face]\n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device)\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(255, 0, 0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0\n ), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(0, 0, 255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255\n ), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n",
"step-4": "import dlib\nimport cv2\nimport imageio\nimport torch\nfrom PIL import Image\nfrom model import AgeGenderModel\nfrom mix_model import MixModel\nfrom torchvision.transforms import transforms\nfrom tqdm import tqdm\nfrom retinaface.pre_trained_models import get_model\ntransform = transforms.Compose([transforms.Resize((112, 112)), transforms.\n ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, \n 0.1994, 0.201))])\nmodel = MixModel()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nckpt = torch.load('outputs_w_free/model_epoch_50.pth')\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\nmodel_face = get_model('resnet50_2020-07-20', max_size=512, device='cuda:1')\nmodel_face.eval()\ndetector = dlib.get_frontal_face_detector()\nFPS = 30\nout_video = imageio.get_writer('/home/cybercore/haimd/w_freeze_osaka.mp4',\n format='mp4', mode='I', fps=FPS)\nvideo = imageio.get_reader('/home/cybercore/haimd/osaka.mp4')\nfor img in tqdm(video):\n if img is not None:\n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n x1_face = bbox[0] - 20\n y1_face = bbox[1] - 20\n x2_face = bbox[2] + 20\n y2_face = bbox[3] + 20\n if x1_face > 0 and y1_face > 0:\n img_face = img[y1_face:y2_face, x1_face:x2_face]\n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device)\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(255, 0, 0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0\n ), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item() * 100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2),\n color=(0, 0, 255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255\n ), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n",
"step-5": "import dlib\nimport cv2\nimport imageio\nimport torch\nfrom PIL import Image \nfrom model import AgeGenderModel\nfrom mix_model import MixModel\nfrom torchvision.transforms import transforms\nfrom tqdm import tqdm\nfrom retinaface.pre_trained_models import get_model\n\n\ntransform = transforms.Compose([\n transforms.Resize((112, 112)),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465),\n (0.2023, 0.1994, 0.2010)),\n])\n\n# Load model age gender\nmodel = MixModel()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nckpt = torch.load(\"outputs_w_free/model_epoch_50.pth\")\n\nmodel.load_state_dict(ckpt['model_state_dict'])\nmodel.eval()\nmodel.to(device)\n\nmodel_face = get_model(\"resnet50_2020-07-20\", max_size=512, device='cuda:1')\nmodel_face.eval()\n\n# load the detector\ndetector = dlib.get_frontal_face_detector()\nFPS = 30\n# read the video\nout_video = imageio.get_writer(\"/home/cybercore/haimd/w_freeze_osaka.mp4\", format='mp4', mode='I', fps=FPS)\nvideo = imageio.get_reader(\"/home/cybercore/haimd/osaka.mp4\")\nfor img in tqdm(video):\n if img is not None:\n # gray = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY)\n \n # faces = detector(gray)\n \n annotation = model_face.predict_jsons(img)\n max_thresh = annotation[0]['score']\n bbox = annotation[0]['bbox']\n if max_thresh > 0.3:\n max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]\n \n \n # for face in faces:\n # print(face)\n x1 = bbox[0]\n y1 = bbox[1]\n x2 = bbox[2]\n y2 = bbox[3]\n \n x1_face = bbox[0]-20\n y1_face = bbox[1]-20\n x2_face = bbox[2]+20\n y2_face = bbox[3]+20\n if x1_face > 0 and y1_face > 0:\n \n img_face = img[y1_face:y2_face, x1_face:x2_face]\n \n imageio.imwrite('face.jpg', img_face)\n img_face = Image.fromarray(img_face)\n img_face = transform(img_face)\n\n img_face = torch.unsqueeze(img_face, 0)\n img_face = img_face.to(device) \n\n gen_pred, age_cls_pred, age_reg_pred = model(img_face)\n _, gen_preds = torch.max(gen_pred, 1)\n _, age_cls_pred = torch.max(age_cls_pred, 1)\n\n if gen_preds.item() == 1:\n text = f'M:{int(age_reg_pred.item()*100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(255,0,0), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_AA)\n elif gen_preds.item() == 0:\n text = f'F:{int(age_reg_pred.item()*100)}'\n cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(0,0,255), thickness=4)\n cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1, color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)\n out_video.append_data(img)\nout_video.close()\nprint('Done')\n \n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# written by Mohammad Shahrad @UBC
RECENT_NEWS_COUNT = 5
import json
with open("./all-news.json") as f:
allNews = json.load(f)
recent_news_counter = 0
with open("./recent-news.js", "w") as f:
f.write("document.write('\\\n")
f.write("<ul>\\\n")
for value in allNews.values():
f.write("<li>\\\n")
date, content = value["date"], value["content"]
date = date.replace("'", "\\'")
content = content.replace("'", "\\'")
f.write(date + " - " + content + "\\\n")
f.write("</li>\\\n")
recent_news_counter += 1
if recent_news_counter >= RECENT_NEWS_COUNT:
break
f.write("</ul>\\\n")
f.write("');")
with open("./all-news.js", "w") as f:
f.write("document.write('\\\n")
f.write("<ul>\\\n")
for value in allNews.values():
f.write("<li>\\\n")
date, content = value["date"], value["content"]
date = date.replace("'", "\\'")
content = content.replace("'", "\\'")
f.write(date + " - " + content + "\\\n")
f.write("</li>\\\n")
f.write("</ul>\\\n")
f.write("');")
|
normal
|
{
"blob_id": "6097840cdf4b42efaca3e197f88703d927abe889",
"index": 2548,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('./all-news.json') as f:\n allNews = json.load(f)\n<mask token>\nwith open('./recent-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n recent_news_counter += 1\n if recent_news_counter >= RECENT_NEWS_COUNT:\n break\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\nwith open('./all-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\n",
"step-3": "RECENT_NEWS_COUNT = 5\n<mask token>\nwith open('./all-news.json') as f:\n allNews = json.load(f)\nrecent_news_counter = 0\nwith open('./recent-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n recent_news_counter += 1\n if recent_news_counter >= RECENT_NEWS_COUNT:\n break\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\nwith open('./all-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\n",
"step-4": "RECENT_NEWS_COUNT = 5\nimport json\nwith open('./all-news.json') as f:\n allNews = json.load(f)\nrecent_news_counter = 0\nwith open('./recent-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n recent_news_counter += 1\n if recent_news_counter >= RECENT_NEWS_COUNT:\n break\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\nwith open('./all-news.js', 'w') as f:\n f.write(\"document.write('\\\\\\n\")\n f.write('<ul>\\\\\\n')\n for value in allNews.values():\n f.write('<li>\\\\\\n')\n date, content = value['date'], value['content']\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + ' - ' + content + '\\\\\\n')\n f.write('</li>\\\\\\n')\n f.write('</ul>\\\\\\n')\n f.write(\"');\")\n",
"step-5": "# written by Mohammad Shahrad @UBC\n\nRECENT_NEWS_COUNT = 5\n\nimport json\n\nwith open(\"./all-news.json\") as f:\n allNews = json.load(f)\n\nrecent_news_counter = 0\nwith open(\"./recent-news.js\", \"w\") as f:\n f.write(\"document.write('\\\\\\n\")\n f.write(\"<ul>\\\\\\n\")\n for value in allNews.values():\n f.write(\"<li>\\\\\\n\")\n date, content = value[\"date\"], value[\"content\"]\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + \" - \" + content + \"\\\\\\n\")\n f.write(\"</li>\\\\\\n\")\n recent_news_counter += 1\n if recent_news_counter >= RECENT_NEWS_COUNT:\n break\n f.write(\"</ul>\\\\\\n\")\n f.write(\"');\")\n\nwith open(\"./all-news.js\", \"w\") as f:\n f.write(\"document.write('\\\\\\n\")\n f.write(\"<ul>\\\\\\n\")\n for value in allNews.values():\n f.write(\"<li>\\\\\\n\")\n date, content = value[\"date\"], value[\"content\"]\n date = date.replace(\"'\", \"\\\\'\")\n content = content.replace(\"'\", \"\\\\'\")\n f.write(date + \" - \" + content + \"\\\\\\n\")\n f.write(\"</li>\\\\\\n\")\n f.write(\"</ul>\\\\\\n\")\n f.write(\"');\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from calc.models import CalcResult
class MyAdmin(admin.ModelAdmin):
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class CalcResultAdmin(MyAdmin):
list_display = ('result', 'message', 'time',)
search_fields = ('result', 'message', 'time',)
admin.site.register(CalcResult, CalcResultAdmin)
|
normal
|
{
"blob_id": "e2573a5dc507e9aeb811fbc254129aeb6e54cc0b",
"index": 2483,
"step-1": "<mask token>\n\n\nclass MyAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n\n\nclass CalcResultAdmin(MyAdmin):\n list_display = 'result', 'message', 'time'\n search_fields = 'result', 'message', 'time'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyAdmin(admin.ModelAdmin):\n <mask token>\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass CalcResultAdmin(MyAdmin):\n list_display = 'result', 'message', 'time'\n search_fields = 'result', 'message', 'time'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyAdmin(admin.ModelAdmin):\n\n def has_add_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass CalcResultAdmin(MyAdmin):\n list_display = 'result', 'message', 'time'\n search_fields = 'result', 'message', 'time'\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass MyAdmin(admin.ModelAdmin):\n\n def has_add_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass CalcResultAdmin(MyAdmin):\n list_display = 'result', 'message', 'time'\n search_fields = 'result', 'message', 'time'\n\n\nadmin.site.register(CalcResult, CalcResultAdmin)\n",
"step-5": "from django.contrib import admin\n\nfrom calc.models import CalcResult\n\n\nclass MyAdmin(admin.ModelAdmin):\n def has_add_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass CalcResultAdmin(MyAdmin):\n list_display = ('result', 'message', 'time',)\n search_fields = ('result', 'message', 'time',)\n\nadmin.site.register(CalcResult, CalcResultAdmin)\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
<|reserved_special_token_0|>
class graphingwidget(QtGui.QWidget):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, reactor, configpath):
super(graphingwidget, self).__init__()
self.reactor = reactor
self.configpath = configpath
self.initialize()
self.timeoffset = 200
def mouseMoved(self, evt):
pos = evt
if self.figure.sceneBoundingRect().contains(pos):
mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)
index = int(mousePoint.x())
self.label.setPos(mousePoint)
self.label.setText('{:d}'.format(int(mousePoint.x())))
def initialize(self):
sys.path.append(self.configpath)
global hardwareConfiguration
from hardwareConfiguration import hardwareConfiguration
self.ddslist = hardwareConfiguration.ddsDict
self.do_layout()
self.figure.scene().sigMouseMoved.connect(self.mouseMoved)
def do_layout(self):
yaxis = pg.AxisItem(orientation='left')
ticks = []
sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].
channelnumber)
for i in range(0, 17):
if i < len(sorteddict):
string = sorteddict[i][0]
else:
string = ''
ticks.append((i + 0.5, string))
yaxis.setTicks([ticks])
self.figure = pg.PlotWidget(axisItems={'left': yaxis})
self.layoutVertical = QtGui.QVBoxLayout(self)
self.layoutVertical.addWidget(self.figure)
for adds, config in self.ddslist.iteritems():
self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))
self.figure.setYRange(0, 17)
self.figure.setMouseEnabled(y=False)
self.figure.showGrid(x=True, y=True, alpha=0.4)
self.label = pg.TextItem(anchor=(0, 1))
self.figure.plotItem.addItem(self.label)
@pyqtSlot(list, int, list)
def do_sequence(self, sequence, timelength, steadystatenames):
xdatalist = []
ydatalist = []
for achannelname, adds in self.ddslist.iteritems():
channelpulses = [i for i in sequence if i[0] == achannelname]
channelpulses.sort(key=lambda name: name[1]['ms'])
starttimes = []
endtimes = []
frequencies = []
amplitudes = []
if achannelname in steadystatenames:
starttimes.append(-50)
endtimes.append(0)
for apulse in channelpulses:
starttimes.append(apulse[1]['ms'])
endtimes.append((apulse[1] + apulse[2])['ms'])
yhigh = 0.75 + adds.channelnumber
ylow = 0.25 + adds.channelnumber
if len(starttimes) < 0:
xdata = [starttimes[0] + self.timeoffset]
ydata = [yhigh]
else:
xdata = [self.timeoffset]
ydata = [ylow]
for i in range(len(starttimes)):
xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[
i] + self.timeoffset] * 2
if ydata[-1] == ylow:
ydata += [ylow, yhigh, yhigh, ylow]
else:
ydata += [yhigh, ylow, ylow, yhigh]
xdata.append(timelength)
ydata.append(ylow)
xdatalist.append(xdata)
ydatalist.append(ydata)
self.plot(xdatalist, ydatalist)
def plot(self, xlist, ylist):
self.figure.clear()
self.figure.addItem(self.label)
for i in range(len(xlist)):
xdata = xlist[i]
ydata = ylist[i]
if len(xdata) > 1:
self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))
self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(
'r', style=Qt.DashLine)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class graphingwidget(QtGui.QWidget):
SIGNALID = 104692
update_signal = pyqtSignal(list)
def __init__(self, reactor, configpath):
super(graphingwidget, self).__init__()
self.reactor = reactor
self.configpath = configpath
self.initialize()
self.timeoffset = 200
def mouseMoved(self, evt):
pos = evt
if self.figure.sceneBoundingRect().contains(pos):
mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)
index = int(mousePoint.x())
self.label.setPos(mousePoint)
self.label.setText('{:d}'.format(int(mousePoint.x())))
def initialize(self):
sys.path.append(self.configpath)
global hardwareConfiguration
from hardwareConfiguration import hardwareConfiguration
self.ddslist = hardwareConfiguration.ddsDict
self.do_layout()
self.figure.scene().sigMouseMoved.connect(self.mouseMoved)
def do_layout(self):
yaxis = pg.AxisItem(orientation='left')
ticks = []
sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].
channelnumber)
for i in range(0, 17):
if i < len(sorteddict):
string = sorteddict[i][0]
else:
string = ''
ticks.append((i + 0.5, string))
yaxis.setTicks([ticks])
self.figure = pg.PlotWidget(axisItems={'left': yaxis})
self.layoutVertical = QtGui.QVBoxLayout(self)
self.layoutVertical.addWidget(self.figure)
for adds, config in self.ddslist.iteritems():
self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))
self.figure.setYRange(0, 17)
self.figure.setMouseEnabled(y=False)
self.figure.showGrid(x=True, y=True, alpha=0.4)
self.label = pg.TextItem(anchor=(0, 1))
self.figure.plotItem.addItem(self.label)
@pyqtSlot(list, int, list)
def do_sequence(self, sequence, timelength, steadystatenames):
xdatalist = []
ydatalist = []
for achannelname, adds in self.ddslist.iteritems():
channelpulses = [i for i in sequence if i[0] == achannelname]
channelpulses.sort(key=lambda name: name[1]['ms'])
starttimes = []
endtimes = []
frequencies = []
amplitudes = []
if achannelname in steadystatenames:
starttimes.append(-50)
endtimes.append(0)
for apulse in channelpulses:
starttimes.append(apulse[1]['ms'])
endtimes.append((apulse[1] + apulse[2])['ms'])
yhigh = 0.75 + adds.channelnumber
ylow = 0.25 + adds.channelnumber
if len(starttimes) < 0:
xdata = [starttimes[0] + self.timeoffset]
ydata = [yhigh]
else:
xdata = [self.timeoffset]
ydata = [ylow]
for i in range(len(starttimes)):
xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[
i] + self.timeoffset] * 2
if ydata[-1] == ylow:
ydata += [ylow, yhigh, yhigh, ylow]
else:
ydata += [yhigh, ylow, ylow, yhigh]
xdata.append(timelength)
ydata.append(ylow)
xdatalist.append(xdata)
ydatalist.append(ydata)
self.plot(xdatalist, ydatalist)
def plot(self, xlist, ylist):
self.figure.clear()
self.figure.addItem(self.label)
for i in range(len(xlist)):
xdata = xlist[i]
ydata = ylist[i]
if len(xdata) > 1:
self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))
self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(
'r', style=Qt.DashLine)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
global harwareConfiguration
class graphingwidget(QtGui.QWidget):
SIGNALID = 104692
update_signal = pyqtSignal(list)
def __init__(self, reactor, configpath):
super(graphingwidget, self).__init__()
self.reactor = reactor
self.configpath = configpath
self.initialize()
self.timeoffset = 200
def mouseMoved(self, evt):
pos = evt
if self.figure.sceneBoundingRect().contains(pos):
mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)
index = int(mousePoint.x())
self.label.setPos(mousePoint)
self.label.setText('{:d}'.format(int(mousePoint.x())))
def initialize(self):
sys.path.append(self.configpath)
global hardwareConfiguration
from hardwareConfiguration import hardwareConfiguration
self.ddslist = hardwareConfiguration.ddsDict
self.do_layout()
self.figure.scene().sigMouseMoved.connect(self.mouseMoved)
def do_layout(self):
yaxis = pg.AxisItem(orientation='left')
ticks = []
sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].
channelnumber)
for i in range(0, 17):
if i < len(sorteddict):
string = sorteddict[i][0]
else:
string = ''
ticks.append((i + 0.5, string))
yaxis.setTicks([ticks])
self.figure = pg.PlotWidget(axisItems={'left': yaxis})
self.layoutVertical = QtGui.QVBoxLayout(self)
self.layoutVertical.addWidget(self.figure)
for adds, config in self.ddslist.iteritems():
self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))
self.figure.setYRange(0, 17)
self.figure.setMouseEnabled(y=False)
self.figure.showGrid(x=True, y=True, alpha=0.4)
self.label = pg.TextItem(anchor=(0, 1))
self.figure.plotItem.addItem(self.label)
@pyqtSlot(list, int, list)
def do_sequence(self, sequence, timelength, steadystatenames):
xdatalist = []
ydatalist = []
for achannelname, adds in self.ddslist.iteritems():
channelpulses = [i for i in sequence if i[0] == achannelname]
channelpulses.sort(key=lambda name: name[1]['ms'])
starttimes = []
endtimes = []
frequencies = []
amplitudes = []
if achannelname in steadystatenames:
starttimes.append(-50)
endtimes.append(0)
for apulse in channelpulses:
starttimes.append(apulse[1]['ms'])
endtimes.append((apulse[1] + apulse[2])['ms'])
yhigh = 0.75 + adds.channelnumber
ylow = 0.25 + adds.channelnumber
if len(starttimes) < 0:
xdata = [starttimes[0] + self.timeoffset]
ydata = [yhigh]
else:
xdata = [self.timeoffset]
ydata = [ylow]
for i in range(len(starttimes)):
xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[
i] + self.timeoffset] * 2
if ydata[-1] == ylow:
ydata += [ylow, yhigh, yhigh, ylow]
else:
ydata += [yhigh, ylow, ylow, yhigh]
xdata.append(timelength)
ydata.append(ylow)
xdatalist.append(xdata)
ydatalist.append(ydata)
self.plot(xdatalist, ydatalist)
def plot(self, xlist, ylist):
self.figure.clear()
self.figure.addItem(self.label)
for i in range(len(xlist)):
xdata = xlist[i]
ydata = ylist[i]
if len(xdata) > 1:
self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))
self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(
'r', style=Qt.DashLine)))
<|reserved_special_token_1|>
from PyQt4 import QtGui
from PyQt4.QtCore import pyqtSignal, pyqtSlot, QObject, Qt
from twisted.internet.defer import inlineCallbacks
import numpy as np
from connection import connection
import pyqtgraph as pg
from pyqtgraph.SignalProxy import SignalProxy
import sys
import time
global harwareConfiguration
class graphingwidget(QtGui.QWidget):
SIGNALID = 104692
update_signal = pyqtSignal(list)
def __init__(self, reactor, configpath):
super(graphingwidget, self).__init__()
self.reactor = reactor
self.configpath = configpath
self.initialize()
self.timeoffset = 200
def mouseMoved(self, evt):
pos = evt
if self.figure.sceneBoundingRect().contains(pos):
mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)
index = int(mousePoint.x())
self.label.setPos(mousePoint)
self.label.setText('{:d}'.format(int(mousePoint.x())))
def initialize(self):
sys.path.append(self.configpath)
global hardwareConfiguration
from hardwareConfiguration import hardwareConfiguration
self.ddslist = hardwareConfiguration.ddsDict
self.do_layout()
self.figure.scene().sigMouseMoved.connect(self.mouseMoved)
def do_layout(self):
yaxis = pg.AxisItem(orientation='left')
ticks = []
sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].
channelnumber)
for i in range(0, 17):
if i < len(sorteddict):
string = sorteddict[i][0]
else:
string = ''
ticks.append((i + 0.5, string))
yaxis.setTicks([ticks])
self.figure = pg.PlotWidget(axisItems={'left': yaxis})
self.layoutVertical = QtGui.QVBoxLayout(self)
self.layoutVertical.addWidget(self.figure)
for adds, config in self.ddslist.iteritems():
self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))
self.figure.setYRange(0, 17)
self.figure.setMouseEnabled(y=False)
self.figure.showGrid(x=True, y=True, alpha=0.4)
self.label = pg.TextItem(anchor=(0, 1))
self.figure.plotItem.addItem(self.label)
@pyqtSlot(list, int, list)
def do_sequence(self, sequence, timelength, steadystatenames):
xdatalist = []
ydatalist = []
for achannelname, adds in self.ddslist.iteritems():
channelpulses = [i for i in sequence if i[0] == achannelname]
channelpulses.sort(key=lambda name: name[1]['ms'])
starttimes = []
endtimes = []
frequencies = []
amplitudes = []
if achannelname in steadystatenames:
starttimes.append(-50)
endtimes.append(0)
for apulse in channelpulses:
starttimes.append(apulse[1]['ms'])
endtimes.append((apulse[1] + apulse[2])['ms'])
yhigh = 0.75 + adds.channelnumber
ylow = 0.25 + adds.channelnumber
if len(starttimes) < 0:
xdata = [starttimes[0] + self.timeoffset]
ydata = [yhigh]
else:
xdata = [self.timeoffset]
ydata = [ylow]
for i in range(len(starttimes)):
xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[
i] + self.timeoffset] * 2
if ydata[-1] == ylow:
ydata += [ylow, yhigh, yhigh, ylow]
else:
ydata += [yhigh, ylow, ylow, yhigh]
xdata.append(timelength)
ydata.append(ylow)
xdatalist.append(xdata)
ydatalist.append(ydata)
self.plot(xdatalist, ydatalist)
def plot(self, xlist, ylist):
self.figure.clear()
self.figure.addItem(self.label)
for i in range(len(xlist)):
xdata = xlist[i]
ydata = ylist[i]
if len(xdata) > 1:
self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))
self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(
'r', style=Qt.DashLine)))
<|reserved_special_token_1|>
from PyQt4 import QtGui
from PyQt4.QtCore import pyqtSignal, pyqtSlot, QObject, Qt
from twisted.internet.defer import inlineCallbacks
import numpy as np
from connection import connection
import pyqtgraph as pg
from pyqtgraph.SignalProxy import SignalProxy
import sys
import time
global harwareConfiguration
class graphingwidget(QtGui.QWidget):
SIGNALID = 104692
update_signal = pyqtSignal(list)
def __init__(self,reactor, configpath):
super(graphingwidget,self).__init__()
self.reactor = reactor
self.configpath = configpath
self.initialize()
self.timeoffset = 200
def mouseMoved(self,evt):
pos = evt
if self.figure.sceneBoundingRect().contains(pos):
mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)
index = int(mousePoint.x())
self.label.setPos(mousePoint)
self.label.setText("{:d}".format(int(mousePoint.x())))
def initialize(self):
sys.path.append(self.configpath)
global hardwareConfiguration
from hardwareConfiguration import hardwareConfiguration
self.ddslist = hardwareConfiguration.ddsDict
self.do_layout()
self.figure.scene().sigMouseMoved.connect(self.mouseMoved)
def do_layout(self):
yaxis = pg.AxisItem(orientation='left')
ticks = []
sorteddict = sorted(self.ddslist.items(),key =lambda x: x[1].channelnumber)
for i in range(0,17):
if i < len(sorteddict):
string = sorteddict[i][0]
else:
string = ""
ticks.append((i+0.5,string))
yaxis.setTicks([ticks])
self.figure = pg.PlotWidget(axisItems ={'left':yaxis})
self.layoutVertical = QtGui.QVBoxLayout(self)
self.layoutVertical.addWidget(self.figure)
for adds,config in self.ddslist.iteritems():
self.figure.addItem(pg.PlotCurveItem(range(10),[1]*10,pen='w'))
self.figure.setYRange(0,17)
self.figure.setMouseEnabled(y=False)
self.figure.showGrid(x=True,y=True,alpha=0.4)
self.label = pg.TextItem(anchor=(0,1))
self.figure.plotItem.addItem(self.label)
@pyqtSlot(list,int,list)
def do_sequence(self,sequence,timelength,steadystatenames):
xdatalist = []
ydatalist = []
for achannelname, adds in self.ddslist.iteritems():
channelpulses = [i for i in sequence if i[0] == achannelname]
channelpulses.sort(key= lambda name: name[1]['ms'])
starttimes = []
endtimes = []
frequencies = []
amplitudes = []
if achannelname in steadystatenames:
starttimes.append(-50)
endtimes.append(0)
for apulse in channelpulses:
starttimes.append(apulse[1]['ms'])
endtimes.append((apulse[1]+ apulse[2])['ms'])
yhigh = 0.75+adds.channelnumber
ylow = 0.25+adds.channelnumber
if len(starttimes) < 0:
xdata = [starttimes[0]+self.timeoffset]
ydata = [yhigh]
else:
xdata = [self.timeoffset]
ydata = [ylow]
for i in range(len(starttimes)):
xdata += [starttimes[i]+self.timeoffset]*2 + [endtimes[i]+self.timeoffset]*2
if ydata[-1] == ylow:
ydata += [ylow,yhigh,yhigh,ylow]
else:
ydata += [yhigh,ylow,ylow,yhigh]
xdata.append(timelength)
ydata.append(ylow)
xdatalist.append(xdata)
ydatalist.append(ydata)
self.plot(xdatalist,ydatalist)
def plot(self,xlist,ylist):
self.figure.clear()
self.figure.addItem(self.label)
for i in range(len(xlist)):
xdata = xlist[i]
ydata = ylist[i]
if len(xdata)>1:
self.figure.addItem(pg.PlotCurveItem(xdata,ydata,pen='w'))
self.figure.addItem(pg.InfiniteLine(self.timeoffset,pen=pg.mkPen('r',style=Qt.DashLine)))
|
flexible
|
{
"blob_id": "8173afbd82b8da04db4625ac686c0d052e65a21c",
"index": 9470,
"step-1": "<mask token>\n\n\nclass graphingwidget(QtGui.QWidget):\n <mask token>\n <mask token>\n\n def __init__(self, reactor, configpath):\n super(graphingwidget, self).__init__()\n self.reactor = reactor\n self.configpath = configpath\n self.initialize()\n self.timeoffset = 200\n\n def mouseMoved(self, evt):\n pos = evt\n if self.figure.sceneBoundingRect().contains(pos):\n mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)\n index = int(mousePoint.x())\n self.label.setPos(mousePoint)\n self.label.setText('{:d}'.format(int(mousePoint.x())))\n\n def initialize(self):\n sys.path.append(self.configpath)\n global hardwareConfiguration\n from hardwareConfiguration import hardwareConfiguration\n self.ddslist = hardwareConfiguration.ddsDict\n self.do_layout()\n self.figure.scene().sigMouseMoved.connect(self.mouseMoved)\n\n def do_layout(self):\n yaxis = pg.AxisItem(orientation='left')\n ticks = []\n sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].\n channelnumber)\n for i in range(0, 17):\n if i < len(sorteddict):\n string = sorteddict[i][0]\n else:\n string = ''\n ticks.append((i + 0.5, string))\n yaxis.setTicks([ticks])\n self.figure = pg.PlotWidget(axisItems={'left': yaxis})\n self.layoutVertical = QtGui.QVBoxLayout(self)\n self.layoutVertical.addWidget(self.figure)\n for adds, config in self.ddslist.iteritems():\n self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))\n self.figure.setYRange(0, 17)\n self.figure.setMouseEnabled(y=False)\n self.figure.showGrid(x=True, y=True, alpha=0.4)\n self.label = pg.TextItem(anchor=(0, 1))\n self.figure.plotItem.addItem(self.label)\n\n @pyqtSlot(list, int, list)\n def do_sequence(self, sequence, timelength, steadystatenames):\n xdatalist = []\n ydatalist = []\n for achannelname, adds in self.ddslist.iteritems():\n channelpulses = [i for i in sequence if i[0] == achannelname]\n channelpulses.sort(key=lambda name: name[1]['ms'])\n starttimes = []\n endtimes = []\n frequencies = []\n amplitudes = []\n if achannelname in steadystatenames:\n starttimes.append(-50)\n endtimes.append(0)\n for apulse in channelpulses:\n starttimes.append(apulse[1]['ms'])\n endtimes.append((apulse[1] + apulse[2])['ms'])\n yhigh = 0.75 + adds.channelnumber\n ylow = 0.25 + adds.channelnumber\n if len(starttimes) < 0:\n xdata = [starttimes[0] + self.timeoffset]\n ydata = [yhigh]\n else:\n xdata = [self.timeoffset]\n ydata = [ylow]\n for i in range(len(starttimes)):\n xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[\n i] + self.timeoffset] * 2\n if ydata[-1] == ylow:\n ydata += [ylow, yhigh, yhigh, ylow]\n else:\n ydata += [yhigh, ylow, ylow, yhigh]\n xdata.append(timelength)\n ydata.append(ylow)\n xdatalist.append(xdata)\n ydatalist.append(ydata)\n self.plot(xdatalist, ydatalist)\n\n def plot(self, xlist, ylist):\n self.figure.clear()\n self.figure.addItem(self.label)\n for i in range(len(xlist)):\n xdata = xlist[i]\n ydata = ylist[i]\n if len(xdata) > 1:\n self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))\n self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(\n 'r', style=Qt.DashLine)))\n",
"step-2": "<mask token>\n\n\nclass graphingwidget(QtGui.QWidget):\n SIGNALID = 104692\n update_signal = pyqtSignal(list)\n\n def __init__(self, reactor, configpath):\n super(graphingwidget, self).__init__()\n self.reactor = reactor\n self.configpath = configpath\n self.initialize()\n self.timeoffset = 200\n\n def mouseMoved(self, evt):\n pos = evt\n if self.figure.sceneBoundingRect().contains(pos):\n mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)\n index = int(mousePoint.x())\n self.label.setPos(mousePoint)\n self.label.setText('{:d}'.format(int(mousePoint.x())))\n\n def initialize(self):\n sys.path.append(self.configpath)\n global hardwareConfiguration\n from hardwareConfiguration import hardwareConfiguration\n self.ddslist = hardwareConfiguration.ddsDict\n self.do_layout()\n self.figure.scene().sigMouseMoved.connect(self.mouseMoved)\n\n def do_layout(self):\n yaxis = pg.AxisItem(orientation='left')\n ticks = []\n sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].\n channelnumber)\n for i in range(0, 17):\n if i < len(sorteddict):\n string = sorteddict[i][0]\n else:\n string = ''\n ticks.append((i + 0.5, string))\n yaxis.setTicks([ticks])\n self.figure = pg.PlotWidget(axisItems={'left': yaxis})\n self.layoutVertical = QtGui.QVBoxLayout(self)\n self.layoutVertical.addWidget(self.figure)\n for adds, config in self.ddslist.iteritems():\n self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))\n self.figure.setYRange(0, 17)\n self.figure.setMouseEnabled(y=False)\n self.figure.showGrid(x=True, y=True, alpha=0.4)\n self.label = pg.TextItem(anchor=(0, 1))\n self.figure.plotItem.addItem(self.label)\n\n @pyqtSlot(list, int, list)\n def do_sequence(self, sequence, timelength, steadystatenames):\n xdatalist = []\n ydatalist = []\n for achannelname, adds in self.ddslist.iteritems():\n channelpulses = [i for i in sequence if i[0] == achannelname]\n channelpulses.sort(key=lambda name: name[1]['ms'])\n starttimes = []\n endtimes = []\n frequencies = []\n amplitudes = []\n if achannelname in steadystatenames:\n starttimes.append(-50)\n endtimes.append(0)\n for apulse in channelpulses:\n starttimes.append(apulse[1]['ms'])\n endtimes.append((apulse[1] + apulse[2])['ms'])\n yhigh = 0.75 + adds.channelnumber\n ylow = 0.25 + adds.channelnumber\n if len(starttimes) < 0:\n xdata = [starttimes[0] + self.timeoffset]\n ydata = [yhigh]\n else:\n xdata = [self.timeoffset]\n ydata = [ylow]\n for i in range(len(starttimes)):\n xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[\n i] + self.timeoffset] * 2\n if ydata[-1] == ylow:\n ydata += [ylow, yhigh, yhigh, ylow]\n else:\n ydata += [yhigh, ylow, ylow, yhigh]\n xdata.append(timelength)\n ydata.append(ylow)\n xdatalist.append(xdata)\n ydatalist.append(ydata)\n self.plot(xdatalist, ydatalist)\n\n def plot(self, xlist, ylist):\n self.figure.clear()\n self.figure.addItem(self.label)\n for i in range(len(xlist)):\n xdata = xlist[i]\n ydata = ylist[i]\n if len(xdata) > 1:\n self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))\n self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(\n 'r', style=Qt.DashLine)))\n",
"step-3": "<mask token>\nglobal harwareConfiguration\n\n\nclass graphingwidget(QtGui.QWidget):\n SIGNALID = 104692\n update_signal = pyqtSignal(list)\n\n def __init__(self, reactor, configpath):\n super(graphingwidget, self).__init__()\n self.reactor = reactor\n self.configpath = configpath\n self.initialize()\n self.timeoffset = 200\n\n def mouseMoved(self, evt):\n pos = evt\n if self.figure.sceneBoundingRect().contains(pos):\n mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)\n index = int(mousePoint.x())\n self.label.setPos(mousePoint)\n self.label.setText('{:d}'.format(int(mousePoint.x())))\n\n def initialize(self):\n sys.path.append(self.configpath)\n global hardwareConfiguration\n from hardwareConfiguration import hardwareConfiguration\n self.ddslist = hardwareConfiguration.ddsDict\n self.do_layout()\n self.figure.scene().sigMouseMoved.connect(self.mouseMoved)\n\n def do_layout(self):\n yaxis = pg.AxisItem(orientation='left')\n ticks = []\n sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].\n channelnumber)\n for i in range(0, 17):\n if i < len(sorteddict):\n string = sorteddict[i][0]\n else:\n string = ''\n ticks.append((i + 0.5, string))\n yaxis.setTicks([ticks])\n self.figure = pg.PlotWidget(axisItems={'left': yaxis})\n self.layoutVertical = QtGui.QVBoxLayout(self)\n self.layoutVertical.addWidget(self.figure)\n for adds, config in self.ddslist.iteritems():\n self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))\n self.figure.setYRange(0, 17)\n self.figure.setMouseEnabled(y=False)\n self.figure.showGrid(x=True, y=True, alpha=0.4)\n self.label = pg.TextItem(anchor=(0, 1))\n self.figure.plotItem.addItem(self.label)\n\n @pyqtSlot(list, int, list)\n def do_sequence(self, sequence, timelength, steadystatenames):\n xdatalist = []\n ydatalist = []\n for achannelname, adds in self.ddslist.iteritems():\n channelpulses = [i for i in sequence if i[0] == achannelname]\n channelpulses.sort(key=lambda name: name[1]['ms'])\n starttimes = []\n endtimes = []\n frequencies = []\n amplitudes = []\n if achannelname in steadystatenames:\n starttimes.append(-50)\n endtimes.append(0)\n for apulse in channelpulses:\n starttimes.append(apulse[1]['ms'])\n endtimes.append((apulse[1] + apulse[2])['ms'])\n yhigh = 0.75 + adds.channelnumber\n ylow = 0.25 + adds.channelnumber\n if len(starttimes) < 0:\n xdata = [starttimes[0] + self.timeoffset]\n ydata = [yhigh]\n else:\n xdata = [self.timeoffset]\n ydata = [ylow]\n for i in range(len(starttimes)):\n xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[\n i] + self.timeoffset] * 2\n if ydata[-1] == ylow:\n ydata += [ylow, yhigh, yhigh, ylow]\n else:\n ydata += [yhigh, ylow, ylow, yhigh]\n xdata.append(timelength)\n ydata.append(ylow)\n xdatalist.append(xdata)\n ydatalist.append(ydata)\n self.plot(xdatalist, ydatalist)\n\n def plot(self, xlist, ylist):\n self.figure.clear()\n self.figure.addItem(self.label)\n for i in range(len(xlist)):\n xdata = xlist[i]\n ydata = ylist[i]\n if len(xdata) > 1:\n self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))\n self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(\n 'r', style=Qt.DashLine)))\n",
"step-4": "from PyQt4 import QtGui\nfrom PyQt4.QtCore import pyqtSignal, pyqtSlot, QObject, Qt\nfrom twisted.internet.defer import inlineCallbacks\nimport numpy as np\nfrom connection import connection\nimport pyqtgraph as pg\nfrom pyqtgraph.SignalProxy import SignalProxy\nimport sys\nimport time\nglobal harwareConfiguration\n\n\nclass graphingwidget(QtGui.QWidget):\n SIGNALID = 104692\n update_signal = pyqtSignal(list)\n\n def __init__(self, reactor, configpath):\n super(graphingwidget, self).__init__()\n self.reactor = reactor\n self.configpath = configpath\n self.initialize()\n self.timeoffset = 200\n\n def mouseMoved(self, evt):\n pos = evt\n if self.figure.sceneBoundingRect().contains(pos):\n mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)\n index = int(mousePoint.x())\n self.label.setPos(mousePoint)\n self.label.setText('{:d}'.format(int(mousePoint.x())))\n\n def initialize(self):\n sys.path.append(self.configpath)\n global hardwareConfiguration\n from hardwareConfiguration import hardwareConfiguration\n self.ddslist = hardwareConfiguration.ddsDict\n self.do_layout()\n self.figure.scene().sigMouseMoved.connect(self.mouseMoved)\n\n def do_layout(self):\n yaxis = pg.AxisItem(orientation='left')\n ticks = []\n sorteddict = sorted(self.ddslist.items(), key=lambda x: x[1].\n channelnumber)\n for i in range(0, 17):\n if i < len(sorteddict):\n string = sorteddict[i][0]\n else:\n string = ''\n ticks.append((i + 0.5, string))\n yaxis.setTicks([ticks])\n self.figure = pg.PlotWidget(axisItems={'left': yaxis})\n self.layoutVertical = QtGui.QVBoxLayout(self)\n self.layoutVertical.addWidget(self.figure)\n for adds, config in self.ddslist.iteritems():\n self.figure.addItem(pg.PlotCurveItem(range(10), [1] * 10, pen='w'))\n self.figure.setYRange(0, 17)\n self.figure.setMouseEnabled(y=False)\n self.figure.showGrid(x=True, y=True, alpha=0.4)\n self.label = pg.TextItem(anchor=(0, 1))\n self.figure.plotItem.addItem(self.label)\n\n @pyqtSlot(list, int, list)\n def do_sequence(self, sequence, timelength, steadystatenames):\n xdatalist = []\n ydatalist = []\n for achannelname, adds in self.ddslist.iteritems():\n channelpulses = [i for i in sequence if i[0] == achannelname]\n channelpulses.sort(key=lambda name: name[1]['ms'])\n starttimes = []\n endtimes = []\n frequencies = []\n amplitudes = []\n if achannelname in steadystatenames:\n starttimes.append(-50)\n endtimes.append(0)\n for apulse in channelpulses:\n starttimes.append(apulse[1]['ms'])\n endtimes.append((apulse[1] + apulse[2])['ms'])\n yhigh = 0.75 + adds.channelnumber\n ylow = 0.25 + adds.channelnumber\n if len(starttimes) < 0:\n xdata = [starttimes[0] + self.timeoffset]\n ydata = [yhigh]\n else:\n xdata = [self.timeoffset]\n ydata = [ylow]\n for i in range(len(starttimes)):\n xdata += [starttimes[i] + self.timeoffset] * 2 + [endtimes[\n i] + self.timeoffset] * 2\n if ydata[-1] == ylow:\n ydata += [ylow, yhigh, yhigh, ylow]\n else:\n ydata += [yhigh, ylow, ylow, yhigh]\n xdata.append(timelength)\n ydata.append(ylow)\n xdatalist.append(xdata)\n ydatalist.append(ydata)\n self.plot(xdatalist, ydatalist)\n\n def plot(self, xlist, ylist):\n self.figure.clear()\n self.figure.addItem(self.label)\n for i in range(len(xlist)):\n xdata = xlist[i]\n ydata = ylist[i]\n if len(xdata) > 1:\n self.figure.addItem(pg.PlotCurveItem(xdata, ydata, pen='w'))\n self.figure.addItem(pg.InfiniteLine(self.timeoffset, pen=pg.mkPen(\n 'r', style=Qt.DashLine)))\n",
"step-5": "from PyQt4 import QtGui\nfrom PyQt4.QtCore import pyqtSignal, pyqtSlot, QObject, Qt\nfrom twisted.internet.defer import inlineCallbacks\nimport numpy as np\nfrom connection import connection\nimport pyqtgraph as pg\nfrom pyqtgraph.SignalProxy import SignalProxy\nimport sys\nimport time\n\nglobal harwareConfiguration\n\n\nclass graphingwidget(QtGui.QWidget):\n\n SIGNALID = 104692\n update_signal = pyqtSignal(list)\n def __init__(self,reactor, configpath):\n super(graphingwidget,self).__init__()\n self.reactor = reactor\n self.configpath = configpath\n self.initialize()\n self.timeoffset = 200\n\n \n def mouseMoved(self,evt):\n pos = evt\n if self.figure.sceneBoundingRect().contains(pos):\n mousePoint = self.figure.plotItem.vb.mapSceneToView(pos)\n index = int(mousePoint.x())\n self.label.setPos(mousePoint)\n self.label.setText(\"{:d}\".format(int(mousePoint.x())))\n \n def initialize(self):\n sys.path.append(self.configpath)\n global hardwareConfiguration\n from hardwareConfiguration import hardwareConfiguration\n self.ddslist = hardwareConfiguration.ddsDict\n self.do_layout()\n self.figure.scene().sigMouseMoved.connect(self.mouseMoved)\n \n\n\n def do_layout(self):\n yaxis = pg.AxisItem(orientation='left')\n ticks = []\n sorteddict = sorted(self.ddslist.items(),key =lambda x: x[1].channelnumber)\n for i in range(0,17):\n if i < len(sorteddict):\n string = sorteddict[i][0]\n else:\n string = \"\"\n ticks.append((i+0.5,string))\n yaxis.setTicks([ticks])\n self.figure = pg.PlotWidget(axisItems ={'left':yaxis})\n self.layoutVertical = QtGui.QVBoxLayout(self)\n self.layoutVertical.addWidget(self.figure)\n \n for adds,config in self.ddslist.iteritems():\n self.figure.addItem(pg.PlotCurveItem(range(10),[1]*10,pen='w'))\n self.figure.setYRange(0,17)\n self.figure.setMouseEnabled(y=False)\n self.figure.showGrid(x=True,y=True,alpha=0.4)\n self.label = pg.TextItem(anchor=(0,1))\n self.figure.plotItem.addItem(self.label)\n\n @pyqtSlot(list,int,list) \n def do_sequence(self,sequence,timelength,steadystatenames):\n xdatalist = []\n ydatalist = []\n for achannelname, adds in self.ddslist.iteritems():\n channelpulses = [i for i in sequence if i[0] == achannelname]\n channelpulses.sort(key= lambda name: name[1]['ms'])\n starttimes = []\n endtimes = []\n frequencies = []\n amplitudes = []\n if achannelname in steadystatenames:\n starttimes.append(-50)\n endtimes.append(0)\n for apulse in channelpulses:\n starttimes.append(apulse[1]['ms'])\n endtimes.append((apulse[1]+ apulse[2])['ms'])\n yhigh = 0.75+adds.channelnumber\n ylow = 0.25+adds.channelnumber\n \n if len(starttimes) < 0:\n xdata = [starttimes[0]+self.timeoffset]\n ydata = [yhigh]\n else:\n xdata = [self.timeoffset]\n ydata = [ylow]\n for i in range(len(starttimes)):\n xdata += [starttimes[i]+self.timeoffset]*2 + [endtimes[i]+self.timeoffset]*2\n \n if ydata[-1] == ylow:\n ydata += [ylow,yhigh,yhigh,ylow]\n else:\n ydata += [yhigh,ylow,ylow,yhigh]\n xdata.append(timelength)\n ydata.append(ylow)\n xdatalist.append(xdata)\n ydatalist.append(ydata)\n self.plot(xdatalist,ydatalist)\n \n \n def plot(self,xlist,ylist):\n self.figure.clear()\n self.figure.addItem(self.label)\n for i in range(len(xlist)):\n xdata = xlist[i]\n ydata = ylist[i]\n if len(xdata)>1:\n self.figure.addItem(pg.PlotCurveItem(xdata,ydata,pen='w'))\n self.figure.addItem(pg.InfiniteLine(self.timeoffset,pen=pg.mkPen('r',style=Qt.DashLine)))\n\n ",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
class LanguageAdmin(admin.ModelAdmin):
<|reserved_special_token_0|>
list_display = 'language_name', 'description', 'environment'
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('lesson_number', 'lesson_title', 'language',
'lesson_description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonHintAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = 'hint_title', 'lesson', 'hint_description'
filter_horizontal = ()
list_filter = ()
fieldsets = ()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LanguageAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = 'language_name', 'description', 'environment'
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('lesson_number', 'lesson_title', 'language',
'lesson_description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonHintAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = 'hint_title', 'lesson', 'hint_description'
filter_horizontal = ()
list_filter = ()
fieldsets = ()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProgrammingEnvironmentAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = 'environment_name', 'description'
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LanguageAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = 'language_name', 'description', 'environment'
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('lesson_number', 'lesson_title', 'language',
'lesson_description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonHintAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = 'hint_title', 'lesson', 'hint_description'
filter_horizontal = ()
list_filter = ()
fieldsets = ()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from django.contrib import admin
from lesson.models import ProgrammingEnvironment, Language, Lesson, LessonHint
class ProgrammingEnvironmentAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = 'environment_name', 'description'
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LanguageAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = 'language_name', 'description', 'environment'
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('lesson_number', 'lesson_title', 'language',
'lesson_description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonHintAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = 'hint_title', 'lesson', 'hint_description'
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.register(ProgrammingEnvironment, ProgrammingEnvironmentAdmin)
admin.site.register(Language, LanguageAdmin)
admin.site.register(Lesson, LessonAdmin)
admin.site.register(LessonHint, LessonHintAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from lesson.models import ProgrammingEnvironment, Language, Lesson, LessonHint
# list_display - Show these fields for each model on the Admin site
# search_fields - Allow searching in these fields
# Register models for the Admin site
class ProgrammingEnvironmentAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('environment_name', 'description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LanguageAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('language_name', 'description', 'environment')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('lesson_number', 'lesson_title', 'language', 'lesson_description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
class LessonHintAdmin(admin.ModelAdmin):
""" Model for the Admin page """
list_display = ('hint_title', 'lesson', 'hint_description')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.register(ProgrammingEnvironment, ProgrammingEnvironmentAdmin)
admin.site.register(Language, LanguageAdmin)
admin.site.register(Lesson, LessonAdmin)
admin.site.register(LessonHint, LessonHintAdmin)
|
flexible
|
{
"blob_id": "2500c3562819e4e85ce3cbc30e0ddf1b8437e0a2",
"index": 6448,
"step-1": "<mask token>\n\n\nclass LanguageAdmin(admin.ModelAdmin):\n <mask token>\n list_display = 'language_name', 'description', 'environment'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('lesson_number', 'lesson_title', 'language',\n 'lesson_description')\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonHintAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'hint_title', 'lesson', 'hint_description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LanguageAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'language_name', 'description', 'environment'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('lesson_number', 'lesson_title', 'language',\n 'lesson_description')\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonHintAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'hint_title', 'lesson', 'hint_description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ProgrammingEnvironmentAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'environment_name', 'description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LanguageAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'language_name', 'description', 'environment'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('lesson_number', 'lesson_title', 'language',\n 'lesson_description')\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonHintAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'hint_title', 'lesson', 'hint_description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\n<mask token>\n",
"step-4": "from django.contrib import admin\nfrom lesson.models import ProgrammingEnvironment, Language, Lesson, LessonHint\n\n\nclass ProgrammingEnvironmentAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'environment_name', 'description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LanguageAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'language_name', 'description', 'environment'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('lesson_number', 'lesson_title', 'language',\n 'lesson_description')\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nclass LessonHintAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = 'hint_title', 'lesson', 'hint_description'\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\n\nadmin.site.register(ProgrammingEnvironment, ProgrammingEnvironmentAdmin)\nadmin.site.register(Language, LanguageAdmin)\nadmin.site.register(Lesson, LessonAdmin)\nadmin.site.register(LessonHint, LessonHintAdmin)\n",
"step-5": "from django.contrib import admin\nfrom lesson.models import ProgrammingEnvironment, Language, Lesson, LessonHint\n\n# list_display - Show these fields for each model on the Admin site\n# search_fields - Allow searching in these fields\n\n# Register models for the Admin site\nclass ProgrammingEnvironmentAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('environment_name', 'description')\n\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\nclass LanguageAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('language_name', 'description', 'environment')\n\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\nclass LessonAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('lesson_number', 'lesson_title', 'language', 'lesson_description')\n\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\nclass LessonHintAdmin(admin.ModelAdmin):\n \"\"\" Model for the Admin page \"\"\"\n list_display = ('hint_title', 'lesson', 'hint_description')\n\n filter_horizontal = ()\n list_filter = ()\n fieldsets = ()\n\nadmin.site.register(ProgrammingEnvironment, ProgrammingEnvironmentAdmin)\nadmin.site.register(Language, LanguageAdmin)\nadmin.site.register(Lesson, LessonAdmin)\nadmin.site.register(LessonHint, LessonHintAdmin)",
"step-ids": [
8,
9,
12,
14,
15
]
}
|
[
8,
9,
12,
14,
15
] |
<|reserved_special_token_0|>
def getDirList(p):
p = p.replace('/', '\\')
if p[-1] != '\\':
p = p + '\\'
a = os.listdir(p)
for x in a:
if os.path.isfile(p + x):
a, b = os.path.splitext(p + x)
if 0 < b.find('bak'):
print(p + x)
os.remove(p + x)
elif os.path.isdir(p + x):
if 0 < (p + x).find('.svn'):
for p, d, f in os.walk(p + x):
if p.find('.svn') > 0:
print(p + x)
os.popen('rd /s /q %s' % p)
else:
getDirList(p + x)
def createFile(f):
if os.path.isfile(f):
a_file = io.open(f, encoding='utf-8')
print(a_file.readline())
else:
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getDirList(p):
p = p.replace('/', '\\')
if p[-1] != '\\':
p = p + '\\'
a = os.listdir(p)
for x in a:
if os.path.isfile(p + x):
a, b = os.path.splitext(p + x)
if 0 < b.find('bak'):
print(p + x)
os.remove(p + x)
elif os.path.isdir(p + x):
if 0 < (p + x).find('.svn'):
for p, d, f in os.walk(p + x):
if p.find('.svn') > 0:
print(p + x)
os.popen('rd /s /q %s' % p)
else:
getDirList(p + x)
def createFile(f):
if os.path.isfile(f):
a_file = io.open(f, encoding='utf-8')
print(a_file.readline())
else:
return
while 1 == 1:
print(getDirList('D:\\project'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'lidong'
def getDirList(p):
p = p.replace('/', '\\')
if p[-1] != '\\':
p = p + '\\'
a = os.listdir(p)
for x in a:
if os.path.isfile(p + x):
a, b = os.path.splitext(p + x)
if 0 < b.find('bak'):
print(p + x)
os.remove(p + x)
elif os.path.isdir(p + x):
if 0 < (p + x).find('.svn'):
for p, d, f in os.walk(p + x):
if p.find('.svn') > 0:
print(p + x)
os.popen('rd /s /q %s' % p)
else:
getDirList(p + x)
def createFile(f):
if os.path.isfile(f):
a_file = io.open(f, encoding='utf-8')
print(a_file.readline())
else:
return
while 1 == 1:
print(getDirList('D:\\project'))
<|reserved_special_token_1|>
import fileinput
import io
from locale import str
import os
__author__ = 'lidong'
def getDirList(p):
p = p.replace('/', '\\')
if p[-1] != '\\':
p = p + '\\'
a = os.listdir(p)
for x in a:
if os.path.isfile(p + x):
a, b = os.path.splitext(p + x)
if 0 < b.find('bak'):
print(p + x)
os.remove(p + x)
elif os.path.isdir(p + x):
if 0 < (p + x).find('.svn'):
for p, d, f in os.walk(p + x):
if p.find('.svn') > 0:
print(p + x)
os.popen('rd /s /q %s' % p)
else:
getDirList(p + x)
def createFile(f):
if os.path.isfile(f):
a_file = io.open(f, encoding='utf-8')
print(a_file.readline())
else:
return
while 1 == 1:
print(getDirList('D:\\project'))
<|reserved_special_token_1|>
# coding: UTF-8
import fileinput
import io
from locale import str
import os
__author__ = 'lidong'
def getDirList( p ):
p = p.replace( "/","\\")
if p[ -1] != "\\":
p = p+"\\"
a = os.listdir( p )
for x in a:
if(os.path.isfile( p + x )):
a, b = os.path.splitext( p + x )
if(0<b.find("bak")):
print (p + x)
os.remove( p + x)
elif(os.path.isdir( p + x )): #.svn
if(0<( p + x ).find(".svn")):
for (p,d,f) in os.walk( p + x):
if p.find('.svn')>0:
print (p + x)
os.popen('rd /s /q %s'%p)
else :
getDirList(p + x)
def createFile( f ):
if(os.path.isfile(f)):
a_file = io.open( f, encoding='utf-8')
print(a_file.readline())
else :
return
while 1==1:
print ( getDirList( "D:\project" ) )
|
flexible
|
{
"blob_id": "e553da92b1bb5dfaa0fb7c702f5be4f66201c75b",
"index": 8843,
"step-1": "<mask token>\n\n\ndef getDirList(p):\n p = p.replace('/', '\\\\')\n if p[-1] != '\\\\':\n p = p + '\\\\'\n a = os.listdir(p)\n for x in a:\n if os.path.isfile(p + x):\n a, b = os.path.splitext(p + x)\n if 0 < b.find('bak'):\n print(p + x)\n os.remove(p + x)\n elif os.path.isdir(p + x):\n if 0 < (p + x).find('.svn'):\n for p, d, f in os.walk(p + x):\n if p.find('.svn') > 0:\n print(p + x)\n os.popen('rd /s /q %s' % p)\n else:\n getDirList(p + x)\n\n\ndef createFile(f):\n if os.path.isfile(f):\n a_file = io.open(f, encoding='utf-8')\n print(a_file.readline())\n else:\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getDirList(p):\n p = p.replace('/', '\\\\')\n if p[-1] != '\\\\':\n p = p + '\\\\'\n a = os.listdir(p)\n for x in a:\n if os.path.isfile(p + x):\n a, b = os.path.splitext(p + x)\n if 0 < b.find('bak'):\n print(p + x)\n os.remove(p + x)\n elif os.path.isdir(p + x):\n if 0 < (p + x).find('.svn'):\n for p, d, f in os.walk(p + x):\n if p.find('.svn') > 0:\n print(p + x)\n os.popen('rd /s /q %s' % p)\n else:\n getDirList(p + x)\n\n\ndef createFile(f):\n if os.path.isfile(f):\n a_file = io.open(f, encoding='utf-8')\n print(a_file.readline())\n else:\n return\n\n\nwhile 1 == 1:\n print(getDirList('D:\\\\project'))\n",
"step-3": "<mask token>\n__author__ = 'lidong'\n\n\ndef getDirList(p):\n p = p.replace('/', '\\\\')\n if p[-1] != '\\\\':\n p = p + '\\\\'\n a = os.listdir(p)\n for x in a:\n if os.path.isfile(p + x):\n a, b = os.path.splitext(p + x)\n if 0 < b.find('bak'):\n print(p + x)\n os.remove(p + x)\n elif os.path.isdir(p + x):\n if 0 < (p + x).find('.svn'):\n for p, d, f in os.walk(p + x):\n if p.find('.svn') > 0:\n print(p + x)\n os.popen('rd /s /q %s' % p)\n else:\n getDirList(p + x)\n\n\ndef createFile(f):\n if os.path.isfile(f):\n a_file = io.open(f, encoding='utf-8')\n print(a_file.readline())\n else:\n return\n\n\nwhile 1 == 1:\n print(getDirList('D:\\\\project'))\n",
"step-4": "import fileinput\nimport io\nfrom locale import str\nimport os\n__author__ = 'lidong'\n\n\ndef getDirList(p):\n p = p.replace('/', '\\\\')\n if p[-1] != '\\\\':\n p = p + '\\\\'\n a = os.listdir(p)\n for x in a:\n if os.path.isfile(p + x):\n a, b = os.path.splitext(p + x)\n if 0 < b.find('bak'):\n print(p + x)\n os.remove(p + x)\n elif os.path.isdir(p + x):\n if 0 < (p + x).find('.svn'):\n for p, d, f in os.walk(p + x):\n if p.find('.svn') > 0:\n print(p + x)\n os.popen('rd /s /q %s' % p)\n else:\n getDirList(p + x)\n\n\ndef createFile(f):\n if os.path.isfile(f):\n a_file = io.open(f, encoding='utf-8')\n print(a_file.readline())\n else:\n return\n\n\nwhile 1 == 1:\n print(getDirList('D:\\\\project'))\n",
"step-5": "# coding: UTF-8\nimport fileinput\nimport io\nfrom locale import str\nimport os\n\n__author__ = 'lidong'\n\n\ndef getDirList( p ):\n p = p.replace( \"/\",\"\\\\\")\n if p[ -1] != \"\\\\\":\n p = p+\"\\\\\"\n a = os.listdir( p )\n for x in a:\n if(os.path.isfile( p + x )):\n a, b = os.path.splitext( p + x )\n if(0<b.find(\"bak\")):\n print (p + x)\n os.remove( p + x)\n elif(os.path.isdir( p + x )): #.svn\n if(0<( p + x ).find(\".svn\")):\n for (p,d,f) in os.walk( p + x):\n if p.find('.svn')>0:\n print (p + x)\n os.popen('rd /s /q %s'%p)\n else :\n getDirList(p + x)\n\ndef createFile( f ):\n if(os.path.isfile(f)):\n a_file = io.open( f, encoding='utf-8')\n print(a_file.readline())\n else :\n\n return\n\nwhile 1==1:\n print ( getDirList( \"D:\\project\" ) )\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
import redis
class Carteiro():
if os.environ.get("REDIS_URL") != None:
redis_pool = redis.ConnectionPool.from_url(os.environ.get("REDIS_URL"))
else:
redis_pool = ''
def __init__(self, id, pacote):
if os.environ.get("REDIS_URL") != None:
self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)
else:
self.redis_bd = redis.Redis()
self.user_id = str(id)
self.pacote = bytes(str(pacote), 'ascii')
self.user_dict = self.redis_bd.hgetall(self.user_id)
def guardar_status_encomenda(self, status):
if self.redis_bd.exists(self.user_id):
self.user_dict[self.pacote] = status
self.redis_bd.hmset(self.user_id, self.user_dict)
else:
novo_user_dict = {self.pacote: status}
self.redis_bd.hmset(self.user_id, novo_user_dict)
def ler_carta(self):
carta = self.user_dict.get(self.pacote)
carta = carta.decode(encoding='UTF-8')
return carta
def roubar_pacote(self):
if self.pacote in self.user_dict:
if len(self.user_dict) == 1:
self.redis_bd.delete(self.user_id)
else:
self.redis_bd.hdel(self.user_id, self.pacote)
del self.user_dict[self.pacote]
else:
raise ValueError('codigo nao existente na base de dados')
def checar_existencia_pacote(self):
return self.user_dict.get(self.pacote)
|
normal
|
{
"blob_id": "dd95d14f35b6a92b3363d99a616678da18733a61",
"index": 7839,
"step-1": "<mask token>\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n <mask token>\n <mask token>\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n",
"step-2": "<mask token>\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n <mask token>\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n",
"step-3": "<mask token>\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n\n def ler_carta(self):\n carta = self.user_dict.get(self.pacote)\n carta = carta.decode(encoding='UTF-8')\n return carta\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n",
"step-4": "import os\nimport redis\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n\n def ler_carta(self):\n carta = self.user_dict.get(self.pacote)\n carta = carta.decode(encoding='UTF-8')\n return carta\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n",
"step-5": "import os\nimport redis\n\nclass Carteiro():\n\n if os.environ.get(\"REDIS_URL\") != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get(\"REDIS_URL\"))\n else:\n redis_pool = ''\n \n def __init__(self, id, pacote):\n if os.environ.get(\"REDIS_URL\") != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n \n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n \n def ler_carta(self):\n carta = self.user_dict.get(self.pacote)\n carta = carta.decode(encoding='UTF-8')\n return carta\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from socket import *
from multiprocessing import Process
import sys
ADDR = ("127.0.0.1", 8888)
udp_socket = socket(AF_INET, SOCK_DGRAM)
# udp_socket.bind(("0.0.0.0",6955)) # udp套接字在一段时间不链接后,会自动重新分配端口,所以需要绑定
def login():
while True:
name = input("请输入昵称(不能重复)")
msg = "LOGIN" + "##" + name
udp_socket.sendto(msg.encode(), ADDR)
data, addr = udp_socket.recvfrom(1024)
if data.decode() == "0":
print("昵称已存在,请重新输入")
continue
else:
print("你已进入聊天室")
return name
def chat(name):
p = Process(target=receive, daemon=True)
p.start()
while True:
try:
content = input(">>>>")
except KeyboardInterrupt:
print("程序退出")
content = "" # 如果阻塞在input ctrl c 退出的话,调用my_exit函数
if not content:
my_exit(name)
msg = "CHAT" + "##" + f"{name}:" + content
udp_socket.sendto(msg.encode(), ADDR)
print("你发送了一条消息")
def my_exit(name):
msg = "EXIT" + "##" + name
print("您已退出聊天室")
udp_socket.sendto(msg.encode(), ADDR)
sys.exit()
def receive(): # 作为子进程,收到消息然后打印出收到的内容
while True:
data, addr = udp_socket.recvfrom(1024)
print("\n" + data.decode() + "\n>>>", end="")
def main():
name = login()
chat(name)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "fd6cf903490ff4352e4721282354a68437ecb1e0",
"index": 8314,
"step-1": "<mask token>\n\n\ndef login():\n while True:\n name = input('请输入昵称(不能重复)')\n msg = 'LOGIN' + '##' + name\n udp_socket.sendto(msg.encode(), ADDR)\n data, addr = udp_socket.recvfrom(1024)\n if data.decode() == '0':\n print('昵称已存在,请重新输入')\n continue\n else:\n print('你已进入聊天室')\n return name\n\n\ndef chat(name):\n p = Process(target=receive, daemon=True)\n p.start()\n while True:\n try:\n content = input('>>>>')\n except KeyboardInterrupt:\n print('程序退出')\n content = ''\n if not content:\n my_exit(name)\n msg = 'CHAT' + '##' + f'{name}:' + content\n udp_socket.sendto(msg.encode(), ADDR)\n print('你发送了一条消息')\n\n\n<mask token>\n\n\ndef receive():\n while True:\n data, addr = udp_socket.recvfrom(1024)\n print('\\n' + data.decode() + '\\n>>>', end='')\n\n\ndef main():\n name = login()\n chat(name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef login():\n while True:\n name = input('请输入昵称(不能重复)')\n msg = 'LOGIN' + '##' + name\n udp_socket.sendto(msg.encode(), ADDR)\n data, addr = udp_socket.recvfrom(1024)\n if data.decode() == '0':\n print('昵称已存在,请重新输入')\n continue\n else:\n print('你已进入聊天室')\n return name\n\n\ndef chat(name):\n p = Process(target=receive, daemon=True)\n p.start()\n while True:\n try:\n content = input('>>>>')\n except KeyboardInterrupt:\n print('程序退出')\n content = ''\n if not content:\n my_exit(name)\n msg = 'CHAT' + '##' + f'{name}:' + content\n udp_socket.sendto(msg.encode(), ADDR)\n print('你发送了一条消息')\n\n\ndef my_exit(name):\n msg = 'EXIT' + '##' + name\n print('您已退出聊天室')\n udp_socket.sendto(msg.encode(), ADDR)\n sys.exit()\n\n\ndef receive():\n while True:\n data, addr = udp_socket.recvfrom(1024)\n print('\\n' + data.decode() + '\\n>>>', end='')\n\n\ndef main():\n name = login()\n chat(name)\n\n\n<mask token>\n",
"step-3": "<mask token>\nADDR = '127.0.0.1', 8888\nudp_socket = socket(AF_INET, SOCK_DGRAM)\n\n\ndef login():\n while True:\n name = input('请输入昵称(不能重复)')\n msg = 'LOGIN' + '##' + name\n udp_socket.sendto(msg.encode(), ADDR)\n data, addr = udp_socket.recvfrom(1024)\n if data.decode() == '0':\n print('昵称已存在,请重新输入')\n continue\n else:\n print('你已进入聊天室')\n return name\n\n\ndef chat(name):\n p = Process(target=receive, daemon=True)\n p.start()\n while True:\n try:\n content = input('>>>>')\n except KeyboardInterrupt:\n print('程序退出')\n content = ''\n if not content:\n my_exit(name)\n msg = 'CHAT' + '##' + f'{name}:' + content\n udp_socket.sendto(msg.encode(), ADDR)\n print('你发送了一条消息')\n\n\ndef my_exit(name):\n msg = 'EXIT' + '##' + name\n print('您已退出聊天室')\n udp_socket.sendto(msg.encode(), ADDR)\n sys.exit()\n\n\ndef receive():\n while True:\n data, addr = udp_socket.recvfrom(1024)\n print('\\n' + data.decode() + '\\n>>>', end='')\n\n\ndef main():\n name = login()\n chat(name)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from socket import *\nfrom multiprocessing import Process\nimport sys\nADDR = '127.0.0.1', 8888\nudp_socket = socket(AF_INET, SOCK_DGRAM)\n\n\ndef login():\n while True:\n name = input('请输入昵称(不能重复)')\n msg = 'LOGIN' + '##' + name\n udp_socket.sendto(msg.encode(), ADDR)\n data, addr = udp_socket.recvfrom(1024)\n if data.decode() == '0':\n print('昵称已存在,请重新输入')\n continue\n else:\n print('你已进入聊天室')\n return name\n\n\ndef chat(name):\n p = Process(target=receive, daemon=True)\n p.start()\n while True:\n try:\n content = input('>>>>')\n except KeyboardInterrupt:\n print('程序退出')\n content = ''\n if not content:\n my_exit(name)\n msg = 'CHAT' + '##' + f'{name}:' + content\n udp_socket.sendto(msg.encode(), ADDR)\n print('你发送了一条消息')\n\n\ndef my_exit(name):\n msg = 'EXIT' + '##' + name\n print('您已退出聊天室')\n udp_socket.sendto(msg.encode(), ADDR)\n sys.exit()\n\n\ndef receive():\n while True:\n data, addr = udp_socket.recvfrom(1024)\n print('\\n' + data.decode() + '\\n>>>', end='')\n\n\ndef main():\n name = login()\n chat(name)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from socket import *\nfrom multiprocessing import Process\nimport sys\n\nADDR = (\"127.0.0.1\", 8888)\nudp_socket = socket(AF_INET, SOCK_DGRAM)\n# udp_socket.bind((\"0.0.0.0\",6955)) # udp套接字在一段时间不链接后,会自动重新分配端口,所以需要绑定\n\n\ndef login():\n while True:\n name = input(\"请输入昵称(不能重复)\")\n msg = \"LOGIN\" + \"##\" + name\n udp_socket.sendto(msg.encode(), ADDR)\n data, addr = udp_socket.recvfrom(1024)\n if data.decode() == \"0\":\n print(\"昵称已存在,请重新输入\")\n continue\n else:\n print(\"你已进入聊天室\")\n return name\n\n\ndef chat(name):\n p = Process(target=receive, daemon=True)\n p.start()\n while True:\n try:\n content = input(\">>>>\")\n except KeyboardInterrupt:\n print(\"程序退出\")\n content = \"\" # 如果阻塞在input ctrl c 退出的话,调用my_exit函数\n if not content:\n my_exit(name)\n msg = \"CHAT\" + \"##\" + f\"{name}:\" + content\n udp_socket.sendto(msg.encode(), ADDR)\n print(\"你发送了一条消息\")\n\n\ndef my_exit(name):\n msg = \"EXIT\" + \"##\" + name\n print(\"您已退出聊天室\")\n udp_socket.sendto(msg.encode(), ADDR)\n sys.exit()\n\n\ndef receive(): # 作为子进程,收到消息然后打印出收到的内容\n while True:\n data, addr = udp_socket.recvfrom(1024)\n print(\"\\n\" + data.decode() + \"\\n>>>\", end=\"\")\n\n\ndef main():\n name = login()\n chat(name)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
class Config:
DEBUG = False
TESTING = False
# mysql+pymysql://user:password@host:port/database
# SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://gjp:976431@49.235.194.73:3306/test'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root@127.0.0.1:3306/mydb'
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = 'hdfjds38948938bmbfsd90008'
class DevelopmentConfig(Config):
DEBUG = True
ENV = 'development'
class ProductionConfig(Config):
DATABASE_URI = ''
class TestingConfig(Config):
TESTING = True
|
normal
|
{
"blob_id": "d89f0ef24d8e8d23a77cbbb0ae8723c7dec8c00a",
"index": 4954,
"step-1": "<mask token>\n\n\nclass DevelopmentConfig(Config):\n <mask token>\n <mask token>\n\n\nclass ProductionConfig(Config):\n DATABASE_URI = ''\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-2": "<mask token>\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n ENV = 'development'\n\n\nclass ProductionConfig(Config):\n DATABASE_URI = ''\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-3": "class Config:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n ENV = 'development'\n\n\nclass ProductionConfig(Config):\n DATABASE_URI = ''\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-4": "class Config:\n DEBUG = False\n TESTING = False\n SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root@127.0.0.1:3306/mydb'\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n SECRET_KEY = 'hdfjds38948938bmbfsd90008'\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n ENV = 'development'\n\n\nclass ProductionConfig(Config):\n DATABASE_URI = ''\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-5": "class Config:\n DEBUG = False\n TESTING = False\n # mysql+pymysql://user:password@host:port/database\n # SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://gjp:976431@49.235.194.73:3306/test'\n SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:root@127.0.0.1:3306/mydb'\n SQLALCHEMY_TRACK_MODIFICATIONS = True\n SECRET_KEY = 'hdfjds38948938bmbfsd90008'\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n ENV = 'development'\n\n\nclass ProductionConfig(Config):\n DATABASE_URI = ''\n\n\nclass TestingConfig(Config):\n TESTING = True\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0:
return 1
intersection = np.logical_and(actual, predicted)
return 2.0 * intersection.sum() / im_sum
<|reserved_special_token_0|>
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,
hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None:
print('\n', end=' ')
else:
print('\n', end=' ', file=open(text_file, 'a'))
columnwidth = max([len(x) for x in labels] + [5])
empty_cell = ' ' * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3
) // 2 * ' '
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)
) + fst_empty_cell
if text_file is None:
print(' ' + fst_empty_cell, end=' ')
else:
print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))
for label in labels:
if text_file is None:
print('%{0}s'.format(columnwidth) % label, end=' ')
else:
print('%{0}s'.format(columnwidth) % label, end=' ', file=open(
text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
for i, label1 in enumerate(labels):
if text_file is None:
print(' %{0}s'.format(columnwidth) % label1, end=' ')
else:
print(' %{0}s'.format(columnwidth) % label1, end=' ', file=
open(text_file, 'a'))
for j in range(len(labels)):
cell = '%{}d'.format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None:
print(cell, end=' ')
else:
print(cell, end=' ', file=open(text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0:
return 1
intersection = np.logical_and(actual, predicted)
return 2.0 * intersection.sum() / im_sum
<|reserved_special_token_0|>
def fast_auc(actual, predicted):
r = rankdata(predicted)
n_pos = np.sum(actual)
n_neg = len(actual) - n_pos
return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,
hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None:
print('\n', end=' ')
else:
print('\n', end=' ', file=open(text_file, 'a'))
columnwidth = max([len(x) for x in labels] + [5])
empty_cell = ' ' * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3
) // 2 * ' '
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)
) + fst_empty_cell
if text_file is None:
print(' ' + fst_empty_cell, end=' ')
else:
print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))
for label in labels:
if text_file is None:
print('%{0}s'.format(columnwidth) % label, end=' ')
else:
print('%{0}s'.format(columnwidth) % label, end=' ', file=open(
text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
for i, label1 in enumerate(labels):
if text_file is None:
print(' %{0}s'.format(columnwidth) % label1, end=' ')
else:
print(' %{0}s'.format(columnwidth) % label1, end=' ', file=
open(text_file, 'a'))
for j in range(len(labels)):
cell = '%{}d'.format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None:
print(cell, end=' ')
else:
print(cell, end=' ', file=open(text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
def evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=
None, class_names=None):
classes, _ = np.unique(y_true, return_counts=True)
if class_names is None:
class_names = [str(n) for n in classes]
f1 = f1_score(y_true, y_pred, average='micro')
mcc = matthews_corrcoef(y_true, y_pred)
if len(classes) == 2:
mean_auc = roc_auc_score(y_true, y_proba[:, 1])
else:
mean_auc = roc_auc_score(y_true, y_proba, average='weighted',
multi_class='ovo')
if print_conf:
if text_file is not None:
print('\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *
mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(
text_file, 'a'))
cm = confusion_matrix(y_true, y_pred, labels=classes)
print_cm(cm, class_names, text_file=text_file)
return mean_auc, mcc, f1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0:
return 1
intersection = np.logical_and(actual, predicted)
return 2.0 * intersection.sum() / im_sum
def accuracy_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
num_els = actual.size
intersection = np.logical_and(actual, predicted)
return float(intersection.sum()) / num_els
def fast_auc(actual, predicted):
r = rankdata(predicted)
n_pos = np.sum(actual)
n_neg = len(actual) - n_pos
return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,
hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None:
print('\n', end=' ')
else:
print('\n', end=' ', file=open(text_file, 'a'))
columnwidth = max([len(x) for x in labels] + [5])
empty_cell = ' ' * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3
) // 2 * ' '
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)
) + fst_empty_cell
if text_file is None:
print(' ' + fst_empty_cell, end=' ')
else:
print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))
for label in labels:
if text_file is None:
print('%{0}s'.format(columnwidth) % label, end=' ')
else:
print('%{0}s'.format(columnwidth) % label, end=' ', file=open(
text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
for i, label1 in enumerate(labels):
if text_file is None:
print(' %{0}s'.format(columnwidth) % label1, end=' ')
else:
print(' %{0}s'.format(columnwidth) % label1, end=' ', file=
open(text_file, 'a'))
for j in range(len(labels)):
cell = '%{}d'.format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None:
print(cell, end=' ')
else:
print(cell, end=' ', file=open(text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
def evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=
None, class_names=None):
classes, _ = np.unique(y_true, return_counts=True)
if class_names is None:
class_names = [str(n) for n in classes]
f1 = f1_score(y_true, y_pred, average='micro')
mcc = matthews_corrcoef(y_true, y_pred)
if len(classes) == 2:
mean_auc = roc_auc_score(y_true, y_proba[:, 1])
else:
mean_auc = roc_auc_score(y_true, y_proba, average='weighted',
multi_class='ovo')
if print_conf:
if text_file is not None:
print('\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *
mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(
text_file, 'a'))
cm = confusion_matrix(y_true, y_pred, labels=classes)
print_cm(cm, class_names, text_file=text_file)
return mean_auc, mcc, f1
<|reserved_special_token_1|>
from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix
import numpy as np
from scipy.stats import rankdata
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0:
return 1
intersection = np.logical_and(actual, predicted)
return 2.0 * intersection.sum() / im_sum
def accuracy_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
num_els = actual.size
intersection = np.logical_and(actual, predicted)
return float(intersection.sum()) / num_els
def fast_auc(actual, predicted):
r = rankdata(predicted)
n_pos = np.sum(actual)
n_neg = len(actual) - n_pos
return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,
hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None:
print('\n', end=' ')
else:
print('\n', end=' ', file=open(text_file, 'a'))
columnwidth = max([len(x) for x in labels] + [5])
empty_cell = ' ' * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3
) // 2 * ' '
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)
) + fst_empty_cell
if text_file is None:
print(' ' + fst_empty_cell, end=' ')
else:
print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))
for label in labels:
if text_file is None:
print('%{0}s'.format(columnwidth) % label, end=' ')
else:
print('%{0}s'.format(columnwidth) % label, end=' ', file=open(
text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
for i, label1 in enumerate(labels):
if text_file is None:
print(' %{0}s'.format(columnwidth) % label1, end=' ')
else:
print(' %{0}s'.format(columnwidth) % label1, end=' ', file=
open(text_file, 'a'))
for j in range(len(labels)):
cell = '%{}d'.format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None:
print(cell, end=' ')
else:
print(cell, end=' ', file=open(text_file, 'a'))
if text_file is None:
print()
else:
print(' ', file=open(text_file, 'a'))
def evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=
None, class_names=None):
classes, _ = np.unique(y_true, return_counts=True)
if class_names is None:
class_names = [str(n) for n in classes]
f1 = f1_score(y_true, y_pred, average='micro')
mcc = matthews_corrcoef(y_true, y_pred)
if len(classes) == 2:
mean_auc = roc_auc_score(y_true, y_proba[:, 1])
else:
mean_auc = roc_auc_score(y_true, y_proba, average='weighted',
multi_class='ovo')
if print_conf:
if text_file is not None:
print('\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *
mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(
text_file, 'a'))
cm = confusion_matrix(y_true, y_pred, labels=classes)
print_cm(cm, class_names, text_file=text_file)
return mean_auc, mcc, f1
<|reserved_special_token_1|>
from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix
import numpy as np
from scipy.stats import rankdata
def iou_score(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / (np.sum(union) + 1e-6)
return iou_score
def dice_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
im_sum = actual.sum() + predicted.sum()
if im_sum == 0: return 1
intersection = np.logical_and(actual, predicted)
return 2. * intersection.sum() / im_sum
def accuracy_score(actual, predicted):
actual = np.asarray(actual).astype(np.bool)
predicted = np.asarray(predicted).astype(np.bool)
num_els = actual.size
intersection = np.logical_and(actual, predicted)
return float(intersection.sum()) / num_els
def fast_auc(actual, predicted):
r = rankdata(predicted)
n_pos = np.sum(actual)
n_neg = len(actual) - n_pos
return (np.sum(r[actual==1]) - n_pos*(n_pos+1)/2) / (n_pos*n_neg)
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None, text_file=None):
"""
pretty print for confusion matrixes
https://gist.github.com/zachguo/10296432
"""
if text_file is None: print("\n", end=" ")
else: print("\n", end=" ", file=open(text_file, "a"))
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
fst_empty_cell = (columnwidth - 3) // 2 * " " + "t/p" + (columnwidth - 3) // 2 * " "
if len(fst_empty_cell) < len(empty_cell):
fst_empty_cell = " " * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell
# Print header
if text_file is None: print(" " + fst_empty_cell, end=" ")
else: print(" " + fst_empty_cell, end=" ", file = open(text_file, "a"))
for label in labels:
if text_file is None: print("%{0}s".format(columnwidth) % label, end=" ")
else: print("%{0}s".format(columnwidth) % label, end=" ", file = open(text_file, "a"))
if text_file is None: print()
else: print(' ', file = open(text_file, "a"))
# Print rows
for i, label1 in enumerate(labels):
if text_file is None: print(" %{0}s".format(columnwidth) % label1, end=" ")
else: print(" %{0}s".format(columnwidth) % label1, end=" ", file = open(text_file, "a"))
for j in range(len(labels)):
cell = "%{}d".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
if text_file is None: print(cell, end=" ")
else: print(cell, end=" ", file = open(text_file, "a"))
if text_file is None: print()
else: print(' ', file = open(text_file, "a"))
def evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=None, class_names=None):
classes, _ = np.unique(y_true, return_counts=True)
if class_names is None:
class_names = [str(n) for n in classes]
f1 = f1_score(y_true, y_pred, average='micro')
mcc = matthews_corrcoef(y_true, y_pred)
if len(classes)==2:
mean_auc = roc_auc_score(y_true, y_proba[:,1])
else:
mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovo')
# mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovr')
# ovo should be better, but average is not clear from docs
# mean_auc = roc_auc_score(y_true, y_proba, average='macro', multi_class='ovo')
if print_conf:
if text_file is not None:
print("\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}".format(100*mcc, 100*f1, 100*mean_auc), end=" ", file=open(text_file, "a"))
cm = confusion_matrix(y_true, y_pred, labels=classes)
print_cm(cm, class_names, text_file=text_file)
return mean_auc, mcc, f1
|
flexible
|
{
"blob_id": "c599a75788e3548c52ebb3b29e7a2398ff1b28a2",
"index": 1808,
"step-1": "<mask token>\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\n<mask token>\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\n<mask token>\n\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=\n None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes) == 2:\n mean_auc = roc_auc_score(y_true, y_proba[:, 1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted',\n multi_class='ovo')\n if print_conf:\n if text_file is not None:\n print('\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *\n mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(\n text_file, 'a'))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n return mean_auc, mcc, f1\n",
"step-3": "<mask token>\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\ndef accuracy_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n num_els = actual.size\n intersection = np.logical_and(actual, predicted)\n return float(intersection.sum()) / num_els\n\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=\n None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes) == 2:\n mean_auc = roc_auc_score(y_true, y_proba[:, 1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted',\n multi_class='ovo')\n if print_conf:\n if text_file is not None:\n print('\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *\n mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(\n text_file, 'a'))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n return mean_auc, mcc, f1\n",
"step-4": "from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix\nimport numpy as np\nfrom scipy.stats import rankdata\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-06)\n return iou_score\n\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0:\n return 1\n intersection = np.logical_and(actual, predicted)\n return 2.0 * intersection.sum() / im_sum\n\n\ndef accuracy_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n num_els = actual.size\n intersection = np.logical_and(actual, predicted)\n return float(intersection.sum()) / num_els\n\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual == 1]) - n_pos * (n_pos + 1) / 2) / (n_pos * n_neg)\n\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False,\n hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None:\n print('\\n', end=' ')\n else:\n print('\\n', end=' ', file=open(text_file, 'a'))\n columnwidth = max([len(x) for x in labels] + [5])\n empty_cell = ' ' * columnwidth\n fst_empty_cell = (columnwidth - 3) // 2 * ' ' + 't/p' + (columnwidth - 3\n ) // 2 * ' '\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = ' ' * (len(empty_cell) - len(fst_empty_cell)\n ) + fst_empty_cell\n if text_file is None:\n print(' ' + fst_empty_cell, end=' ')\n else:\n print(' ' + fst_empty_cell, end=' ', file=open(text_file, 'a'))\n for label in labels:\n if text_file is None:\n print('%{0}s'.format(columnwidth) % label, end=' ')\n else:\n print('%{0}s'.format(columnwidth) % label, end=' ', file=open(\n text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n for i, label1 in enumerate(labels):\n if text_file is None:\n print(' %{0}s'.format(columnwidth) % label1, end=' ')\n else:\n print(' %{0}s'.format(columnwidth) % label1, end=' ', file=\n open(text_file, 'a'))\n for j in range(len(labels)):\n cell = '%{}d'.format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None:\n print(cell, end=' ')\n else:\n print(cell, end=' ', file=open(text_file, 'a'))\n if text_file is None:\n print()\n else:\n print(' ', file=open(text_file, 'a'))\n\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=\n None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes) == 2:\n mean_auc = roc_auc_score(y_true, y_proba[:, 1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted',\n multi_class='ovo')\n if print_conf:\n if text_file is not None:\n print('\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}'.format(100 *\n mcc, 100 * f1, 100 * mean_auc), end=' ', file=open(\n text_file, 'a'))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n return mean_auc, mcc, f1\n",
"step-5": "from sklearn.metrics import roc_auc_score, matthews_corrcoef, f1_score, confusion_matrix\nimport numpy as np\nfrom scipy.stats import rankdata\n\n\ndef iou_score(target, prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / (np.sum(union) + 1e-6)\n return iou_score\n\ndef dice_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n im_sum = actual.sum() + predicted.sum()\n if im_sum == 0: return 1\n intersection = np.logical_and(actual, predicted)\n return 2. * intersection.sum() / im_sum\n\ndef accuracy_score(actual, predicted):\n actual = np.asarray(actual).astype(np.bool)\n predicted = np.asarray(predicted).astype(np.bool)\n num_els = actual.size\n intersection = np.logical_and(actual, predicted)\n return float(intersection.sum()) / num_els\n\ndef fast_auc(actual, predicted):\n r = rankdata(predicted)\n n_pos = np.sum(actual)\n n_neg = len(actual) - n_pos\n return (np.sum(r[actual==1]) - n_pos*(n_pos+1)/2) / (n_pos*n_neg)\n\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None, text_file=None):\n \"\"\"\n pretty print for confusion matrixes\n https://gist.github.com/zachguo/10296432\n \"\"\"\n if text_file is None: print(\"\\n\", end=\" \")\n else: print(\"\\n\", end=\" \", file=open(text_file, \"a\"))\n\n columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length\n empty_cell = \" \" * columnwidth\n\n fst_empty_cell = (columnwidth - 3) // 2 * \" \" + \"t/p\" + (columnwidth - 3) // 2 * \" \"\n\n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = \" \" * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell\n # Print header\n if text_file is None: print(\" \" + fst_empty_cell, end=\" \")\n else: print(\" \" + fst_empty_cell, end=\" \", file = open(text_file, \"a\"))\n\n for label in labels:\n if text_file is None: print(\"%{0}s\".format(columnwidth) % label, end=\" \")\n else: print(\"%{0}s\".format(columnwidth) % label, end=\" \", file = open(text_file, \"a\"))\n if text_file is None: print()\n else: print(' ', file = open(text_file, \"a\"))\n # Print rows\n for i, label1 in enumerate(labels):\n if text_file is None: print(\" %{0}s\".format(columnwidth) % label1, end=\" \")\n else: print(\" %{0}s\".format(columnwidth) % label1, end=\" \", file = open(text_file, \"a\"))\n for j in range(len(labels)):\n cell = \"%{}d\".format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n if text_file is None: print(cell, end=\" \")\n else: print(cell, end=\" \", file = open(text_file, \"a\"))\n if text_file is None: print()\n else: print(' ', file = open(text_file, \"a\"))\n\ndef evaluate_multi_cls(y_true, y_pred, y_proba, print_conf=True, text_file=None, class_names=None):\n classes, _ = np.unique(y_true, return_counts=True)\n if class_names is None:\n class_names = [str(n) for n in classes]\n\n f1 = f1_score(y_true, y_pred, average='micro')\n mcc = matthews_corrcoef(y_true, y_pred)\n if len(classes)==2:\n mean_auc = roc_auc_score(y_true, y_proba[:,1])\n else:\n mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovo')\n\n # mean_auc = roc_auc_score(y_true, y_proba, average='weighted', multi_class='ovr')\n # ovo should be better, but average is not clear from docs\n # mean_auc = roc_auc_score(y_true, y_proba, average='macro', multi_class='ovo')\n\n if print_conf:\n if text_file is not None:\n print(\"\\nMCC={:.2f} -- F1={:.2f} -- AUC={:.2f}\".format(100*mcc, 100*f1, 100*mean_auc), end=\" \", file=open(text_file, \"a\"))\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n print_cm(cm, class_names, text_file=text_file)\n\n return mean_auc, mcc, f1",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class TestCase(TestCase):
<|reserved_special_token_0|>
def setUp(self):
Document.objects.create(name='First').save()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCase(TestCase):
<|reserved_special_token_0|>
def setUp(self):
Document.objects.create(name='First').save()
def test_endpoints(self):
"""
test for endpoints
"""
self.assertEqual(self.client.get(reverse(
'pdf_crawler:document-list')).status_code, 200)
self.assertEqual(self.client.get(reverse(
'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)
self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).
status_code, 200)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCase(TestCase):
client = Client()
def setUp(self):
Document.objects.create(name='First').save()
def test_endpoints(self):
"""
test for endpoints
"""
self.assertEqual(self.client.get(reverse(
'pdf_crawler:document-list')).status_code, 200)
self.assertEqual(self.client.get(reverse(
'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)
self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).
status_code, 200)
<|reserved_special_token_1|>
from django.test import TestCase, Client
from pdf_crawler.models import Document
from rest_framework.reverse import reverse
class TestCase(TestCase):
client = Client()
def setUp(self):
Document.objects.create(name='First').save()
def test_endpoints(self):
"""
test for endpoints
"""
self.assertEqual(self.client.get(reverse(
'pdf_crawler:document-list')).status_code, 200)
self.assertEqual(self.client.get(reverse(
'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)
self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).
status_code, 200)
|
flexible
|
{
"blob_id": "0d28ab54f08301d9788ca9a5e46d522e043e9507",
"index": 4474,
"step-1": "<mask token>\n\n\nclass TestCase(TestCase):\n <mask token>\n\n def setUp(self):\n Document.objects.create(name='First').save()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCase(TestCase):\n <mask token>\n\n def setUp(self):\n Document.objects.create(name='First').save()\n\n def test_endpoints(self):\n \"\"\"\n test for endpoints\n \"\"\"\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-list')).status_code, 200)\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)\n self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).\n status_code, 200)\n",
"step-3": "<mask token>\n\n\nclass TestCase(TestCase):\n client = Client()\n\n def setUp(self):\n Document.objects.create(name='First').save()\n\n def test_endpoints(self):\n \"\"\"\n test for endpoints\n \"\"\"\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-list')).status_code, 200)\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)\n self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).\n status_code, 200)\n",
"step-4": "from django.test import TestCase, Client\nfrom pdf_crawler.models import Document\nfrom rest_framework.reverse import reverse\n\n\nclass TestCase(TestCase):\n client = Client()\n\n def setUp(self):\n Document.objects.create(name='First').save()\n\n def test_endpoints(self):\n \"\"\"\n test for endpoints\n \"\"\"\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-list')).status_code, 200)\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)\n self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).\n status_code, 200)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dl = DataLoader(phase='Train', shuffle=True)
X_data, y_data = dl.shuffle_and_get()
X_data = np.reshape(X_data, [-1, cf.Height, cf.Width])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = '0'
<|reserved_special_token_1|>
import tensorflow as tf
import numpy as np
import os
import tensorflow as tf
import glob
import numpy as np
import config as cf
from data_loader import DataLoader
from PIL import Image
from matplotlib import pylab as plt
dl = DataLoader(phase='Train', shuffle=True)
X_data, y_data = dl.shuffle_and_get()
X_data = np.reshape(X_data, [-1, cf.Height, cf.Width])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = '0'
<|reserved_special_token_1|>
# MINISTを読み込んでレイヤーAPIでCNNを構築するファイル
import tensorflow as tf
import numpy as np
import os
import tensorflow as tf
import glob
import numpy as np
import config as cf
from data_loader import DataLoader
from PIL import Image
from matplotlib import pylab as plt
dl = DataLoader(phase='Train', shuffle=True)
X_data , y_data = dl.shuffle_and_get()
# dl_test = DataLoader(phase='Test', shuffle=True)
X_data = np.reshape(X_data,[-1,cf.Height, cf.Width])
# plt.imshow(X_data[0])
# test_imgs, test_gts = dl_test.get_minibatch(shuffle=True)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list="0"
# def load_img():
# import cv2
# img = cv2.imread("test.jpg").astype(np.float32)
# img = cv2.resize(img, (cf.Width, cf.Height,1))
# img = img[:,:,(2,1,0)]
# img = img[np.newaxis, :]
# img = img / 255.
# return img
# with tf.Session(config=config) as sess:
# saver = tf.train.Saver()
# saver.restore(sess, "out.ckpt")
# img = load_img()
# pred = logits.eval(feed_dict={X: img, keep_prob: 1.0})[0]
# pred_label = np.argmax(pred)
# print(pred_label)
# X_data = dataset['train_img']
# y_data = dataset['train_label']
# print('Rows: %d, Columns: %d' % (X_data.shape[0], X_data.shape[1]))
# X_test =dataset['test_img']
# y_test =dataset['test_label']
# print('Rows: %d, Columns: %d' % (X_test.shape[0], X_test.shape[1]))
# X_train, y_train = X_data[:50000,:], y_data[:50000]
# X_valid, y_valid = X_data[50000:,:], y_data[50000:]
# print('Training: ', X_train.shape, y_train.shape)
# print('Validation: ', X_valid.shape, y_valid.shape)
# print('Test Set: ', X_test.shape, y_test.shape)
|
flexible
|
{
"blob_id": "a5559ff22776dee133f5398bae573f515efb8484",
"index": 3820,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndl = DataLoader(phase='Train', shuffle=True)\nX_data, y_data = dl.shuffle_and_get()\nX_data = np.reshape(X_data, [-1, cf.Height, cf.Width])\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.visible_device_list = '0'\n",
"step-3": "import tensorflow as tf\nimport numpy as np\nimport os\nimport tensorflow as tf\nimport glob\nimport numpy as np\nimport config as cf\nfrom data_loader import DataLoader\nfrom PIL import Image\nfrom matplotlib import pylab as plt\ndl = DataLoader(phase='Train', shuffle=True)\nX_data, y_data = dl.shuffle_and_get()\nX_data = np.reshape(X_data, [-1, cf.Height, cf.Width])\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.visible_device_list = '0'\n",
"step-4": "# MINISTを読み込んでレイヤーAPIでCNNを構築するファイル\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport os\r\n\r\nimport tensorflow as tf\r\nimport glob\r\nimport numpy as np\r\n\r\nimport config as cf\r\nfrom data_loader import DataLoader\r\nfrom PIL import Image\r\nfrom matplotlib import pylab as plt\r\n\r\ndl = DataLoader(phase='Train', shuffle=True)\r\nX_data , y_data = dl.shuffle_and_get()\r\n# dl_test = DataLoader(phase='Test', shuffle=True)\r\nX_data = np.reshape(X_data,[-1,cf.Height, cf.Width])\r\n\r\n\r\n# plt.imshow(X_data[0])\r\n# test_imgs, test_gts = dl_test.get_minibatch(shuffle=True)\r\n\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\nconfig.gpu_options.visible_device_list=\"0\"\r\n\r\n\r\n\r\n# def load_img():\r\n# import cv2\r\n# img = cv2.imread(\"test.jpg\").astype(np.float32)\r\n# img = cv2.resize(img, (cf.Width, cf.Height,1))\r\n# img = img[:,:,(2,1,0)]\r\n# img = img[np.newaxis, :]\r\n# img = img / 255.\r\n# return img\r\n\r\n# with tf.Session(config=config) as sess:\r\n# saver = tf.train.Saver()\r\n# saver.restore(sess, \"out.ckpt\")\r\n\r\n# img = load_img()\r\n\r\n# pred = logits.eval(feed_dict={X: img, keep_prob: 1.0})[0]\r\n# pred_label = np.argmax(pred)\r\n# print(pred_label)\r\n\r\n# X_data = dataset['train_img']\r\n# y_data = dataset['train_label']\r\n# print('Rows: %d, Columns: %d' % (X_data.shape[0], X_data.shape[1]))\r\n# X_test =dataset['test_img']\r\n# y_test =dataset['test_label']\r\n# print('Rows: %d, Columns: %d' % (X_test.shape[0], X_test.shape[1]))\r\n\r\n# X_train, y_train = X_data[:50000,:], y_data[:50000]\r\n# X_valid, y_valid = X_data[50000:,:], y_data[50000:]\r\n\r\n# print('Training: ', X_train.shape, y_train.shape)\r\n# print('Validation: ', X_valid.shape, y_valid.shape)\r\n# print('Test Set: ', X_test.shape, y_test.shape)\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.shortcuts import render
from .models import Recipe, Author
def index(request):
recipes_list = Recipe.objects.all()
return render(request, "index.html",
{"data": recipes_list, "title": "Recipe Box"})
def recipeDetail(request, recipe_id):
recipe_detail = Recipe.objects.filter(id=recipe_id).first()
return render(request, "recipe_detail.html",
{"recipe": recipe_detail})
def authorDetail(request, author_id):
author = Author.objects.filter(id=author_id).first()
recipes = Recipe.objects.filter(author=author_id)
return render(request, "author_detail.html",
{"recipes": recipes, "author": author})
|
normal
|
{
"blob_id": "f0f8ad7b65707bcf691847ccb387e4d026b405b5",
"index": 6395,
"step-1": "<mask token>\n\n\ndef authorDetail(request, author_id):\n author = Author.objects.filter(id=author_id).first()\n recipes = Recipe.objects.filter(author=author_id)\n return render(request, 'author_detail.html', {'recipes': recipes,\n 'author': author})\n",
"step-2": "<mask token>\n\n\ndef recipeDetail(request, recipe_id):\n recipe_detail = Recipe.objects.filter(id=recipe_id).first()\n return render(request, 'recipe_detail.html', {'recipe': recipe_detail})\n\n\ndef authorDetail(request, author_id):\n author = Author.objects.filter(id=author_id).first()\n recipes = Recipe.objects.filter(author=author_id)\n return render(request, 'author_detail.html', {'recipes': recipes,\n 'author': author})\n",
"step-3": "<mask token>\n\n\ndef index(request):\n recipes_list = Recipe.objects.all()\n return render(request, 'index.html', {'data': recipes_list, 'title':\n 'Recipe Box'})\n\n\ndef recipeDetail(request, recipe_id):\n recipe_detail = Recipe.objects.filter(id=recipe_id).first()\n return render(request, 'recipe_detail.html', {'recipe': recipe_detail})\n\n\ndef authorDetail(request, author_id):\n author = Author.objects.filter(id=author_id).first()\n recipes = Recipe.objects.filter(author=author_id)\n return render(request, 'author_detail.html', {'recipes': recipes,\n 'author': author})\n",
"step-4": "from django.shortcuts import render\nfrom .models import Recipe, Author\n\n\ndef index(request):\n recipes_list = Recipe.objects.all()\n return render(request, 'index.html', {'data': recipes_list, 'title':\n 'Recipe Box'})\n\n\ndef recipeDetail(request, recipe_id):\n recipe_detail = Recipe.objects.filter(id=recipe_id).first()\n return render(request, 'recipe_detail.html', {'recipe': recipe_detail})\n\n\ndef authorDetail(request, author_id):\n author = Author.objects.filter(id=author_id).first()\n recipes = Recipe.objects.filter(author=author_id)\n return render(request, 'author_detail.html', {'recipes': recipes,\n 'author': author})\n",
"step-5": "from django.shortcuts import render\nfrom .models import Recipe, Author\n\n\ndef index(request):\n recipes_list = Recipe.objects.all()\n return render(request, \"index.html\",\n {\"data\": recipes_list, \"title\": \"Recipe Box\"})\n\n\ndef recipeDetail(request, recipe_id):\n recipe_detail = Recipe.objects.filter(id=recipe_id).first()\n return render(request, \"recipe_detail.html\",\n {\"recipe\": recipe_detail})\n\n\ndef authorDetail(request, author_id):\n author = Author.objects.filter(id=author_id).first()\n recipes = Recipe.objects.filter(author=author_id)\n return render(request, \"author_detail.html\",\n {\"recipes\": recipes, \"author\": author})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
from distutils.core import setup
setup(
name='RBM',
version='0.0.1',
description='Restricted Boltzmann Machines',
long_description='README',
install_requires=['numpy','pandas'],
)
|
normal
|
{
"blob_id": "fab7ee8a7336ba2c044adce4cc8483af78b775ba",
"index": 1827,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='RBM', version='0.0.1', description=\n 'Restricted Boltzmann Machines', long_description='README',\n install_requires=['numpy', 'pandas'])\n",
"step-3": "from distutils.core import setup\nsetup(name='RBM', version='0.0.1', description=\n 'Restricted Boltzmann Machines', long_description='README',\n install_requires=['numpy', 'pandas'])\n",
"step-4": "#!/usr/bin/env python\n\nfrom distutils.core import setup\n\nsetup(\n name='RBM',\n version='0.0.1',\n description='Restricted Boltzmann Machines',\n long_description='README',\n install_requires=['numpy','pandas'],\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Pattern matching problem
Boyer Moore algorithm
First is my attempt, below is the code provided in the book
Idea:
Optimize brute force approach using 2 heuristics:
- Looking-Glass: start searches from last character of the
pattern and work backwards
- Character-Jump: During testing of a pattern P, a mismatch
in T[i] = c with corresponding pattern P[k] is handled:
a) if C is not contained in P, shift P completely past i.
b) if c is contained in P shift P until an occurrence of c
gets aligned with T[i]
"""
def find_boyer_moore(T, P):
""" return lowest index of T at which the substring P begins or -1"""
n, m = len(T), len(P)
if m == 0: return 0
last = {} # Using hash table for fast access
for k in range(m):
last[P[k]] = k
i = m - 1 # i index at T, k index at P
k = m - 1 # j index of last occurrence of T[i] in P
while i < n:
if T[i] == P[k]: # if chars are equal
""" INCORRECT PART """
i -= 1 # normal iteration
k -= 1
if k == 0:
return i # check if Patter is complete
else:
# if j < k (remember k index at P)
# shift i += m - (j+1)
# if j > k
# shift i += m - k
j = last.get(T[i], -1) # -1 if item not there
i += m - (min(k, j+1))
k = m - 1
return -1
def find_boyer_moore2(T, P):
""" return lowest index of T at which the substring P begins or -1"""
n, m = len(T), len(P)
if m == 0: return 0
last = {} # Using hash table for fast access
for k in range(m):
last[P[k]] = k
i = m - 1 # i index at T, k index at P
k = m - 1 # j index of last occurrence of T[i] in P
while i < n:
if T[i] == P[k]: # if chars are equal
if k == 0:
return i # check if Patter is complete
else:
i -= 1 # normal iteration
k -= 1
else:
j = last.get(T[i], -1) # -1 if item not there
i += m - (min(k, j+1))
k = m - 1
return -1
# T = "abacaabadcabacabaabb"
T = "ddcbacab"
P = "abacab"
print(find_boyer_moore2(T, P))
|
normal
|
{
"blob_id": "c418b9b6903ebdad204a3a55f2384a94a3be0d09",
"index": 5561,
"step-1": "<mask token>\n\n\ndef find_boyer_moore2(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n if k == 0:\n return i\n else:\n i -= 1\n k -= 1\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_boyer_moore(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n \"\"\" INCORRECT PART \"\"\"\n i -= 1\n k -= 1\n if k == 0:\n return i\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\ndef find_boyer_moore2(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n if k == 0:\n return i\n else:\n i -= 1\n k -= 1\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_boyer_moore(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n \"\"\" INCORRECT PART \"\"\"\n i -= 1\n k -= 1\n if k == 0:\n return i\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\ndef find_boyer_moore2(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n if k == 0:\n return i\n else:\n i -= 1\n k -= 1\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\n<mask token>\nprint(find_boyer_moore2(T, P))\n",
"step-4": "<mask token>\n\n\ndef find_boyer_moore(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n \"\"\" INCORRECT PART \"\"\"\n i -= 1\n k -= 1\n if k == 0:\n return i\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\ndef find_boyer_moore2(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n if k == 0:\n return i\n else:\n i -= 1\n k -= 1\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\nT = 'ddcbacab'\nP = 'abacab'\nprint(find_boyer_moore2(T, P))\n",
"step-5": "\"\"\"\nPattern matching problem\nBoyer Moore algorithm\n\nFirst is my attempt, below is the code provided in the book\nIdea:\nOptimize brute force approach using 2 heuristics:\n- Looking-Glass: start searches from last character of the\npattern and work backwards\n- Character-Jump: During testing of a pattern P, a mismatch\nin T[i] = c with corresponding pattern P[k] is handled:\na) if C is not contained in P, shift P completely past i.\nb) if c is contained in P shift P until an occurrence of c\ngets aligned with T[i]\n\n\"\"\"\n\n\ndef find_boyer_moore(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0: return 0\n last = {} # Using hash table for fast access\n for k in range(m):\n last[P[k]] = k\n i = m - 1 # i index at T, k index at P\n k = m - 1 # j index of last occurrence of T[i] in P\n while i < n:\n if T[i] == P[k]: # if chars are equal\n \"\"\" INCORRECT PART \"\"\"\n i -= 1 # normal iteration\n k -= 1\n if k == 0:\n return i # check if Patter is complete\n else:\n # if j < k (remember k index at P)\n # shift i += m - (j+1)\n # if j > k\n # shift i += m - k\n j = last.get(T[i], -1) # -1 if item not there\n i += m - (min(k, j+1))\n k = m - 1\n return -1\n\n\ndef find_boyer_moore2(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0: return 0\n last = {} # Using hash table for fast access\n for k in range(m):\n last[P[k]] = k\n i = m - 1 # i index at T, k index at P\n k = m - 1 # j index of last occurrence of T[i] in P\n while i < n:\n if T[i] == P[k]: # if chars are equal\n if k == 0:\n return i # check if Patter is complete\n else:\n i -= 1 # normal iteration\n k -= 1\n else:\n j = last.get(T[i], -1) # -1 if item not there\n i += m - (min(k, j+1))\n k = m - 1\n return -1\n\n# T = \"abacaabadcabacabaabb\"\nT = \"ddcbacab\"\nP = \"abacab\"\nprint(find_boyer_moore2(T, P))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Given a sentence as `txt`, return `True` if any two adjacent words have this
property: One word ends with a vowel, while the word immediately after begins
with a vowel (a e i o u).
### Examples
vowel_links("a very large appliance") ➞ True
vowel_links("go to edabit") ➞ True
vowel_links("an open fire") ➞ False
vowel_links("a sudden applause") ➞ False
### Notes
You can expect sentences in only lowercase, with no punctuation.
"""
def vowel_links(txt):
import re
lst=txt.split(' ')
for i in range(len(lst)-1):
if re.search("[aeiou]",lst[i][-1])and re.search("[aeiou]",lst[i+1][0]):
return True
return False
|
normal
|
{
"blob_id": "eefd94e7c04896cd6265bbacd624bf7e670be445",
"index": 4347,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef vowel_links(txt):\n import re\n lst = txt.split(' ')\n for i in range(len(lst) - 1):\n if re.search('[aeiou]', lst[i][-1]) and re.search('[aeiou]', lst[i +\n 1][0]):\n return True\n return False\n",
"step-3": "\"\"\"\r\n\n\nGiven a sentence as `txt`, return `True` if any two adjacent words have this\nproperty: One word ends with a vowel, while the word immediately after begins\nwith a vowel (a e i o u).\n\n### Examples\n\n vowel_links(\"a very large appliance\") ➞ True\n \n vowel_links(\"go to edabit\") ➞ True\n \n vowel_links(\"an open fire\") ➞ False\n \n vowel_links(\"a sudden applause\") ➞ False\n\n### Notes\n\nYou can expect sentences in only lowercase, with no punctuation.\n\n\"\"\"\r\n\ndef vowel_links(txt):\n import re\n lst=txt.split(' ')\n for i in range(len(lst)-1):\n if re.search(\"[aeiou]\",lst[i][-1])and re.search(\"[aeiou]\",lst[i+1][0]):\n return True\n return False\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import io
import socket
import ssl
from ..exceptions import ProxySchemeUnsupported
from ..packages import six
SSL_BLOCKSIZE = 16384
class SSLTransport:
"""
The SSLTransport wraps an existing socket and establishes an SSL connection.
Contrary to Python's implementation of SSLSocket, it allows you to chain
multiple TLS connections together. It's particularly useful if you need to
implement TLS within TLS.
The class supports most of the socket API operations.
"""
@staticmethod
def _validate_ssl_context_for_tls_in_tls(ssl_context):
"""
Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
for TLS in TLS.
The only requirement is that the ssl_context provides the 'wrap_bio'
methods.
"""
if not hasattr(ssl_context, "wrap_bio"):
if six.PY2:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
"supported on Python 2"
)
else:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
"available on non-native SSLContext"
)
def __init__(
self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True
):
"""
Create an SSLTransport around socket using the provided ssl_context.
"""
self.incoming = ssl.MemoryBIO()
self.outgoing = ssl.MemoryBIO()
self.suppress_ragged_eofs = suppress_ragged_eofs
self.socket = socket
self.sslobj = ssl_context.wrap_bio(
self.incoming, self.outgoing, server_hostname=server_hostname
)
# Perform initial handshake.
self._ssl_io_loop(self.sslobj.do_handshake)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def fileno(self):
return self.socket.fileno()
def read(self, len=1024, buffer=None):
return self._wrap_ssl_read(len, buffer)
def recv(self, len=1024, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv")
return self._wrap_ssl_read(len)
def recv_into(self, buffer, nbytes=None, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv_into")
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
return self.read(nbytes, buffer)
def sendall(self, data, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to sendall")
count = 0
with memoryview(data) as view, view.cast("B") as byte_view:
amount = len(byte_view)
while count < amount:
v = self.send(byte_view[count:])
count += v
def send(self, data, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to send")
response = self._ssl_io_loop(self.sslobj.write, data)
return response
def makefile(
self, mode="r", buffering=None, encoding=None, errors=None, newline=None
):
"""
Python's httpclient uses makefile and buffered io when reading HTTP
messages and we need to support it.
This is unfortunately a copy and paste of socket.py makefile with small
changes to point to the socket directly.
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = socket.SocketIO(self, rawmode)
self.socket._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def unwrap(self):
self._ssl_io_loop(self.sslobj.unwrap)
def close(self):
self.socket.close()
def getpeercert(self, binary_form=False):
return self.sslobj.getpeercert(binary_form)
def version(self):
return self.sslobj.version()
def cipher(self):
return self.sslobj.cipher()
def selected_alpn_protocol(self):
return self.sslobj.selected_alpn_protocol()
def selected_npn_protocol(self):
return self.sslobj.selected_npn_protocol()
def shared_ciphers(self):
return self.sslobj.shared_ciphers()
def compression(self):
return self.sslobj.compression()
def settimeout(self, value):
self.socket.settimeout(value)
def gettimeout(self):
return self.socket.gettimeout()
def _decref_socketios(self):
self.socket._decref_socketios()
def _wrap_ssl_read(self, len, buffer=None):
try:
return self._ssl_io_loop(self.sslobj.read, len, buffer)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
return 0 # eof, return 0.
else:
raise
def _ssl_io_loop(self, func, *args):
"""Performs an I/O loop between incoming/outgoing and the socket."""
should_loop = True
ret = None
while should_loop:
errno = None
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
# WANT_READ, and WANT_WRITE are expected, others are not.
raise e
errno = e.errno
buf = self.outgoing.read()
self.socket.sendall(buf)
if errno is None:
should_loop = False
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = self.socket.recv(SSL_BLOCKSIZE)
if buf:
self.incoming.write(buf)
else:
self.incoming.write_eof()
return ret
|
normal
|
{
"blob_id": "78d59e903fecd211aa975ae4c8dc01b17c8fad44",
"index": 8471,
"step-1": "<mask token>\n\n\nclass SSLTransport:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __exit__(self, *_):\n self.close()\n\n def fileno(self):\n return self.socket.fileno()\n\n def read(self, len=1024, buffer=None):\n return self._wrap_ssl_read(len, buffer)\n\n def recv(self, len=1024, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv')\n return self._wrap_ssl_read(len)\n\n def recv_into(self, buffer, nbytes=None, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv_into'\n )\n if buffer and nbytes is None:\n nbytes = len(buffer)\n elif nbytes is None:\n nbytes = 1024\n return self.read(nbytes, buffer)\n\n def sendall(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to sendall')\n count = 0\n with memoryview(data) as view, view.cast('B') as byte_view:\n amount = len(byte_view)\n while count < amount:\n v = self.send(byte_view[count:])\n count += v\n\n def send(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to send')\n response = self._ssl_io_loop(self.sslobj.write, data)\n return response\n\n def makefile(self, mode='r', buffering=None, encoding=None, errors=None,\n newline=None):\n \"\"\"\n Python's httpclient uses makefile and buffered io when reading HTTP\n messages and we need to support it.\n\n This is unfortunately a copy and paste of socket.py makefile with small\n changes to point to the socket directly.\n \"\"\"\n if not set(mode) <= {'r', 'w', 'b'}:\n raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)\n )\n writing = 'w' in mode\n reading = 'r' in mode or not writing\n assert reading or writing\n binary = 'b' in mode\n rawmode = ''\n if reading:\n rawmode += 'r'\n if writing:\n rawmode += 'w'\n raw = socket.SocketIO(self, rawmode)\n self.socket._io_refs += 1\n if buffering is None:\n buffering = -1\n if buffering < 0:\n buffering = io.DEFAULT_BUFFER_SIZE\n if buffering == 0:\n if not binary:\n raise ValueError('unbuffered streams must be binary')\n return raw\n if reading and writing:\n buffer = io.BufferedRWPair(raw, raw, buffering)\n elif reading:\n buffer = io.BufferedReader(raw, buffering)\n else:\n assert writing\n buffer = io.BufferedWriter(raw, buffering)\n if binary:\n return buffer\n text = io.TextIOWrapper(buffer, encoding, errors, newline)\n text.mode = mode\n return text\n <mask token>\n\n def close(self):\n self.socket.close()\n <mask token>\n\n def version(self):\n return self.sslobj.version()\n\n def cipher(self):\n return self.sslobj.cipher()\n\n def selected_alpn_protocol(self):\n return self.sslobj.selected_alpn_protocol()\n\n def selected_npn_protocol(self):\n return self.sslobj.selected_npn_protocol()\n\n def shared_ciphers(self):\n return self.sslobj.shared_ciphers()\n\n def compression(self):\n return self.sslobj.compression()\n <mask token>\n <mask token>\n\n def _decref_socketios(self):\n self.socket._decref_socketios()\n\n def _wrap_ssl_read(self, len, buffer=None):\n try:\n return self._ssl_io_loop(self.sslobj.read, len, buffer)\n except ssl.SSLError as e:\n if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:\n return 0\n else:\n raise\n\n def _ssl_io_loop(self, func, *args):\n \"\"\"Performs an I/O loop between incoming/outgoing and the socket.\"\"\"\n should_loop = True\n ret = None\n while should_loop:\n errno = None\n try:\n ret = func(*args)\n except ssl.SSLError as e:\n if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.\n SSL_ERROR_WANT_WRITE):\n raise e\n errno = e.errno\n buf = self.outgoing.read()\n self.socket.sendall(buf)\n if errno is None:\n should_loop = False\n elif errno == ssl.SSL_ERROR_WANT_READ:\n buf = self.socket.recv(SSL_BLOCKSIZE)\n if buf:\n self.incoming.write(buf)\n else:\n self.incoming.write_eof()\n return ret\n",
"step-2": "<mask token>\n\n\nclass SSLTransport:\n <mask token>\n <mask token>\n\n def __init__(self, socket, ssl_context, server_hostname=None,\n suppress_ragged_eofs=True):\n \"\"\"\n Create an SSLTransport around socket using the provided ssl_context.\n \"\"\"\n self.incoming = ssl.MemoryBIO()\n self.outgoing = ssl.MemoryBIO()\n self.suppress_ragged_eofs = suppress_ragged_eofs\n self.socket = socket\n self.sslobj = ssl_context.wrap_bio(self.incoming, self.outgoing,\n server_hostname=server_hostname)\n self._ssl_io_loop(self.sslobj.do_handshake)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self.close()\n\n def fileno(self):\n return self.socket.fileno()\n\n def read(self, len=1024, buffer=None):\n return self._wrap_ssl_read(len, buffer)\n\n def recv(self, len=1024, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv')\n return self._wrap_ssl_read(len)\n\n def recv_into(self, buffer, nbytes=None, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv_into'\n )\n if buffer and nbytes is None:\n nbytes = len(buffer)\n elif nbytes is None:\n nbytes = 1024\n return self.read(nbytes, buffer)\n\n def sendall(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to sendall')\n count = 0\n with memoryview(data) as view, view.cast('B') as byte_view:\n amount = len(byte_view)\n while count < amount:\n v = self.send(byte_view[count:])\n count += v\n\n def send(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to send')\n response = self._ssl_io_loop(self.sslobj.write, data)\n return response\n\n def makefile(self, mode='r', buffering=None, encoding=None, errors=None,\n newline=None):\n \"\"\"\n Python's httpclient uses makefile and buffered io when reading HTTP\n messages and we need to support it.\n\n This is unfortunately a copy and paste of socket.py makefile with small\n changes to point to the socket directly.\n \"\"\"\n if not set(mode) <= {'r', 'w', 'b'}:\n raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)\n )\n writing = 'w' in mode\n reading = 'r' in mode or not writing\n assert reading or writing\n binary = 'b' in mode\n rawmode = ''\n if reading:\n rawmode += 'r'\n if writing:\n rawmode += 'w'\n raw = socket.SocketIO(self, rawmode)\n self.socket._io_refs += 1\n if buffering is None:\n buffering = -1\n if buffering < 0:\n buffering = io.DEFAULT_BUFFER_SIZE\n if buffering == 0:\n if not binary:\n raise ValueError('unbuffered streams must be binary')\n return raw\n if reading and writing:\n buffer = io.BufferedRWPair(raw, raw, buffering)\n elif reading:\n buffer = io.BufferedReader(raw, buffering)\n else:\n assert writing\n buffer = io.BufferedWriter(raw, buffering)\n if binary:\n return buffer\n text = io.TextIOWrapper(buffer, encoding, errors, newline)\n text.mode = mode\n return text\n <mask token>\n\n def close(self):\n self.socket.close()\n <mask token>\n\n def version(self):\n return self.sslobj.version()\n\n def cipher(self):\n return self.sslobj.cipher()\n\n def selected_alpn_protocol(self):\n return self.sslobj.selected_alpn_protocol()\n\n def selected_npn_protocol(self):\n return self.sslobj.selected_npn_protocol()\n\n def shared_ciphers(self):\n return self.sslobj.shared_ciphers()\n\n def compression(self):\n return self.sslobj.compression()\n\n def settimeout(self, value):\n self.socket.settimeout(value)\n\n def gettimeout(self):\n return self.socket.gettimeout()\n\n def _decref_socketios(self):\n self.socket._decref_socketios()\n\n def _wrap_ssl_read(self, len, buffer=None):\n try:\n return self._ssl_io_loop(self.sslobj.read, len, buffer)\n except ssl.SSLError as e:\n if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:\n return 0\n else:\n raise\n\n def _ssl_io_loop(self, func, *args):\n \"\"\"Performs an I/O loop between incoming/outgoing and the socket.\"\"\"\n should_loop = True\n ret = None\n while should_loop:\n errno = None\n try:\n ret = func(*args)\n except ssl.SSLError as e:\n if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.\n SSL_ERROR_WANT_WRITE):\n raise e\n errno = e.errno\n buf = self.outgoing.read()\n self.socket.sendall(buf)\n if errno is None:\n should_loop = False\n elif errno == ssl.SSL_ERROR_WANT_READ:\n buf = self.socket.recv(SSL_BLOCKSIZE)\n if buf:\n self.incoming.write(buf)\n else:\n self.incoming.write_eof()\n return ret\n",
"step-3": "<mask token>\n\n\nclass SSLTransport:\n \"\"\"\n The SSLTransport wraps an existing socket and establishes an SSL connection.\n\n Contrary to Python's implementation of SSLSocket, it allows you to chain\n multiple TLS connections together. It's particularly useful if you need to\n implement TLS within TLS.\n\n The class supports most of the socket API operations.\n \"\"\"\n\n @staticmethod\n def _validate_ssl_context_for_tls_in_tls(ssl_context):\n \"\"\"\n Raises a ProxySchemeUnsupported if the provided ssl_context can't be used\n for TLS in TLS.\n\n The only requirement is that the ssl_context provides the 'wrap_bio'\n methods.\n \"\"\"\n if not hasattr(ssl_context, 'wrap_bio'):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't available on non-native SSLContext\"\n )\n\n def __init__(self, socket, ssl_context, server_hostname=None,\n suppress_ragged_eofs=True):\n \"\"\"\n Create an SSLTransport around socket using the provided ssl_context.\n \"\"\"\n self.incoming = ssl.MemoryBIO()\n self.outgoing = ssl.MemoryBIO()\n self.suppress_ragged_eofs = suppress_ragged_eofs\n self.socket = socket\n self.sslobj = ssl_context.wrap_bio(self.incoming, self.outgoing,\n server_hostname=server_hostname)\n self._ssl_io_loop(self.sslobj.do_handshake)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self.close()\n\n def fileno(self):\n return self.socket.fileno()\n\n def read(self, len=1024, buffer=None):\n return self._wrap_ssl_read(len, buffer)\n\n def recv(self, len=1024, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv')\n return self._wrap_ssl_read(len)\n\n def recv_into(self, buffer, nbytes=None, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv_into'\n )\n if buffer and nbytes is None:\n nbytes = len(buffer)\n elif nbytes is None:\n nbytes = 1024\n return self.read(nbytes, buffer)\n\n def sendall(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to sendall')\n count = 0\n with memoryview(data) as view, view.cast('B') as byte_view:\n amount = len(byte_view)\n while count < amount:\n v = self.send(byte_view[count:])\n count += v\n\n def send(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to send')\n response = self._ssl_io_loop(self.sslobj.write, data)\n return response\n\n def makefile(self, mode='r', buffering=None, encoding=None, errors=None,\n newline=None):\n \"\"\"\n Python's httpclient uses makefile and buffered io when reading HTTP\n messages and we need to support it.\n\n This is unfortunately a copy and paste of socket.py makefile with small\n changes to point to the socket directly.\n \"\"\"\n if not set(mode) <= {'r', 'w', 'b'}:\n raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)\n )\n writing = 'w' in mode\n reading = 'r' in mode or not writing\n assert reading or writing\n binary = 'b' in mode\n rawmode = ''\n if reading:\n rawmode += 'r'\n if writing:\n rawmode += 'w'\n raw = socket.SocketIO(self, rawmode)\n self.socket._io_refs += 1\n if buffering is None:\n buffering = -1\n if buffering < 0:\n buffering = io.DEFAULT_BUFFER_SIZE\n if buffering == 0:\n if not binary:\n raise ValueError('unbuffered streams must be binary')\n return raw\n if reading and writing:\n buffer = io.BufferedRWPair(raw, raw, buffering)\n elif reading:\n buffer = io.BufferedReader(raw, buffering)\n else:\n assert writing\n buffer = io.BufferedWriter(raw, buffering)\n if binary:\n return buffer\n text = io.TextIOWrapper(buffer, encoding, errors, newline)\n text.mode = mode\n return text\n\n def unwrap(self):\n self._ssl_io_loop(self.sslobj.unwrap)\n\n def close(self):\n self.socket.close()\n\n def getpeercert(self, binary_form=False):\n return self.sslobj.getpeercert(binary_form)\n\n def version(self):\n return self.sslobj.version()\n\n def cipher(self):\n return self.sslobj.cipher()\n\n def selected_alpn_protocol(self):\n return self.sslobj.selected_alpn_protocol()\n\n def selected_npn_protocol(self):\n return self.sslobj.selected_npn_protocol()\n\n def shared_ciphers(self):\n return self.sslobj.shared_ciphers()\n\n def compression(self):\n return self.sslobj.compression()\n\n def settimeout(self, value):\n self.socket.settimeout(value)\n\n def gettimeout(self):\n return self.socket.gettimeout()\n\n def _decref_socketios(self):\n self.socket._decref_socketios()\n\n def _wrap_ssl_read(self, len, buffer=None):\n try:\n return self._ssl_io_loop(self.sslobj.read, len, buffer)\n except ssl.SSLError as e:\n if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:\n return 0\n else:\n raise\n\n def _ssl_io_loop(self, func, *args):\n \"\"\"Performs an I/O loop between incoming/outgoing and the socket.\"\"\"\n should_loop = True\n ret = None\n while should_loop:\n errno = None\n try:\n ret = func(*args)\n except ssl.SSLError as e:\n if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.\n SSL_ERROR_WANT_WRITE):\n raise e\n errno = e.errno\n buf = self.outgoing.read()\n self.socket.sendall(buf)\n if errno is None:\n should_loop = False\n elif errno == ssl.SSL_ERROR_WANT_READ:\n buf = self.socket.recv(SSL_BLOCKSIZE)\n if buf:\n self.incoming.write(buf)\n else:\n self.incoming.write_eof()\n return ret\n",
"step-4": "<mask token>\nSSL_BLOCKSIZE = 16384\n\n\nclass SSLTransport:\n \"\"\"\n The SSLTransport wraps an existing socket and establishes an SSL connection.\n\n Contrary to Python's implementation of SSLSocket, it allows you to chain\n multiple TLS connections together. It's particularly useful if you need to\n implement TLS within TLS.\n\n The class supports most of the socket API operations.\n \"\"\"\n\n @staticmethod\n def _validate_ssl_context_for_tls_in_tls(ssl_context):\n \"\"\"\n Raises a ProxySchemeUnsupported if the provided ssl_context can't be used\n for TLS in TLS.\n\n The only requirement is that the ssl_context provides the 'wrap_bio'\n methods.\n \"\"\"\n if not hasattr(ssl_context, 'wrap_bio'):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't available on non-native SSLContext\"\n )\n\n def __init__(self, socket, ssl_context, server_hostname=None,\n suppress_ragged_eofs=True):\n \"\"\"\n Create an SSLTransport around socket using the provided ssl_context.\n \"\"\"\n self.incoming = ssl.MemoryBIO()\n self.outgoing = ssl.MemoryBIO()\n self.suppress_ragged_eofs = suppress_ragged_eofs\n self.socket = socket\n self.sslobj = ssl_context.wrap_bio(self.incoming, self.outgoing,\n server_hostname=server_hostname)\n self._ssl_io_loop(self.sslobj.do_handshake)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self.close()\n\n def fileno(self):\n return self.socket.fileno()\n\n def read(self, len=1024, buffer=None):\n return self._wrap_ssl_read(len, buffer)\n\n def recv(self, len=1024, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv')\n return self._wrap_ssl_read(len)\n\n def recv_into(self, buffer, nbytes=None, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to recv_into'\n )\n if buffer and nbytes is None:\n nbytes = len(buffer)\n elif nbytes is None:\n nbytes = 1024\n return self.read(nbytes, buffer)\n\n def sendall(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to sendall')\n count = 0\n with memoryview(data) as view, view.cast('B') as byte_view:\n amount = len(byte_view)\n while count < amount:\n v = self.send(byte_view[count:])\n count += v\n\n def send(self, data, flags=0):\n if flags != 0:\n raise ValueError('non-zero flags not allowed in calls to send')\n response = self._ssl_io_loop(self.sslobj.write, data)\n return response\n\n def makefile(self, mode='r', buffering=None, encoding=None, errors=None,\n newline=None):\n \"\"\"\n Python's httpclient uses makefile and buffered io when reading HTTP\n messages and we need to support it.\n\n This is unfortunately a copy and paste of socket.py makefile with small\n changes to point to the socket directly.\n \"\"\"\n if not set(mode) <= {'r', 'w', 'b'}:\n raise ValueError('invalid mode %r (only r, w, b allowed)' % (mode,)\n )\n writing = 'w' in mode\n reading = 'r' in mode or not writing\n assert reading or writing\n binary = 'b' in mode\n rawmode = ''\n if reading:\n rawmode += 'r'\n if writing:\n rawmode += 'w'\n raw = socket.SocketIO(self, rawmode)\n self.socket._io_refs += 1\n if buffering is None:\n buffering = -1\n if buffering < 0:\n buffering = io.DEFAULT_BUFFER_SIZE\n if buffering == 0:\n if not binary:\n raise ValueError('unbuffered streams must be binary')\n return raw\n if reading and writing:\n buffer = io.BufferedRWPair(raw, raw, buffering)\n elif reading:\n buffer = io.BufferedReader(raw, buffering)\n else:\n assert writing\n buffer = io.BufferedWriter(raw, buffering)\n if binary:\n return buffer\n text = io.TextIOWrapper(buffer, encoding, errors, newline)\n text.mode = mode\n return text\n\n def unwrap(self):\n self._ssl_io_loop(self.sslobj.unwrap)\n\n def close(self):\n self.socket.close()\n\n def getpeercert(self, binary_form=False):\n return self.sslobj.getpeercert(binary_form)\n\n def version(self):\n return self.sslobj.version()\n\n def cipher(self):\n return self.sslobj.cipher()\n\n def selected_alpn_protocol(self):\n return self.sslobj.selected_alpn_protocol()\n\n def selected_npn_protocol(self):\n return self.sslobj.selected_npn_protocol()\n\n def shared_ciphers(self):\n return self.sslobj.shared_ciphers()\n\n def compression(self):\n return self.sslobj.compression()\n\n def settimeout(self, value):\n self.socket.settimeout(value)\n\n def gettimeout(self):\n return self.socket.gettimeout()\n\n def _decref_socketios(self):\n self.socket._decref_socketios()\n\n def _wrap_ssl_read(self, len, buffer=None):\n try:\n return self._ssl_io_loop(self.sslobj.read, len, buffer)\n except ssl.SSLError as e:\n if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:\n return 0\n else:\n raise\n\n def _ssl_io_loop(self, func, *args):\n \"\"\"Performs an I/O loop between incoming/outgoing and the socket.\"\"\"\n should_loop = True\n ret = None\n while should_loop:\n errno = None\n try:\n ret = func(*args)\n except ssl.SSLError as e:\n if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.\n SSL_ERROR_WANT_WRITE):\n raise e\n errno = e.errno\n buf = self.outgoing.read()\n self.socket.sendall(buf)\n if errno is None:\n should_loop = False\n elif errno == ssl.SSL_ERROR_WANT_READ:\n buf = self.socket.recv(SSL_BLOCKSIZE)\n if buf:\n self.incoming.write(buf)\n else:\n self.incoming.write_eof()\n return ret\n",
"step-5": "import io\nimport socket\nimport ssl\n\nfrom ..exceptions import ProxySchemeUnsupported\nfrom ..packages import six\n\nSSL_BLOCKSIZE = 16384\n\n\nclass SSLTransport:\n \"\"\"\n The SSLTransport wraps an existing socket and establishes an SSL connection.\n\n Contrary to Python's implementation of SSLSocket, it allows you to chain\n multiple TLS connections together. It's particularly useful if you need to\n implement TLS within TLS.\n\n The class supports most of the socket API operations.\n \"\"\"\n\n @staticmethod\n def _validate_ssl_context_for_tls_in_tls(ssl_context):\n \"\"\"\n Raises a ProxySchemeUnsupported if the provided ssl_context can't be used\n for TLS in TLS.\n\n The only requirement is that the ssl_context provides the 'wrap_bio'\n methods.\n \"\"\"\n\n if not hasattr(ssl_context, \"wrap_bio\"):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"available on non-native SSLContext\"\n )\n\n def __init__(\n self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True\n ):\n \"\"\"\n Create an SSLTransport around socket using the provided ssl_context.\n \"\"\"\n self.incoming = ssl.MemoryBIO()\n self.outgoing = ssl.MemoryBIO()\n\n self.suppress_ragged_eofs = suppress_ragged_eofs\n self.socket = socket\n\n self.sslobj = ssl_context.wrap_bio(\n self.incoming, self.outgoing, server_hostname=server_hostname\n )\n\n # Perform initial handshake.\n self._ssl_io_loop(self.sslobj.do_handshake)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self.close()\n\n def fileno(self):\n return self.socket.fileno()\n\n def read(self, len=1024, buffer=None):\n return self._wrap_ssl_read(len, buffer)\n\n def recv(self, len=1024, flags=0):\n if flags != 0:\n raise ValueError(\"non-zero flags not allowed in calls to recv\")\n return self._wrap_ssl_read(len)\n\n def recv_into(self, buffer, nbytes=None, flags=0):\n if flags != 0:\n raise ValueError(\"non-zero flags not allowed in calls to recv_into\")\n if buffer and (nbytes is None):\n nbytes = len(buffer)\n elif nbytes is None:\n nbytes = 1024\n return self.read(nbytes, buffer)\n\n def sendall(self, data, flags=0):\n if flags != 0:\n raise ValueError(\"non-zero flags not allowed in calls to sendall\")\n count = 0\n with memoryview(data) as view, view.cast(\"B\") as byte_view:\n amount = len(byte_view)\n while count < amount:\n v = self.send(byte_view[count:])\n count += v\n\n def send(self, data, flags=0):\n if flags != 0:\n raise ValueError(\"non-zero flags not allowed in calls to send\")\n response = self._ssl_io_loop(self.sslobj.write, data)\n return response\n\n def makefile(\n self, mode=\"r\", buffering=None, encoding=None, errors=None, newline=None\n ):\n \"\"\"\n Python's httpclient uses makefile and buffered io when reading HTTP\n messages and we need to support it.\n\n This is unfortunately a copy and paste of socket.py makefile with small\n changes to point to the socket directly.\n \"\"\"\n if not set(mode) <= {\"r\", \"w\", \"b\"}:\n raise ValueError(\"invalid mode %r (only r, w, b allowed)\" % (mode,))\n\n writing = \"w\" in mode\n reading = \"r\" in mode or not writing\n assert reading or writing\n binary = \"b\" in mode\n rawmode = \"\"\n if reading:\n rawmode += \"r\"\n if writing:\n rawmode += \"w\"\n raw = socket.SocketIO(self, rawmode)\n self.socket._io_refs += 1\n if buffering is None:\n buffering = -1\n if buffering < 0:\n buffering = io.DEFAULT_BUFFER_SIZE\n if buffering == 0:\n if not binary:\n raise ValueError(\"unbuffered streams must be binary\")\n return raw\n if reading and writing:\n buffer = io.BufferedRWPair(raw, raw, buffering)\n elif reading:\n buffer = io.BufferedReader(raw, buffering)\n else:\n assert writing\n buffer = io.BufferedWriter(raw, buffering)\n if binary:\n return buffer\n text = io.TextIOWrapper(buffer, encoding, errors, newline)\n text.mode = mode\n return text\n\n def unwrap(self):\n self._ssl_io_loop(self.sslobj.unwrap)\n\n def close(self):\n self.socket.close()\n\n def getpeercert(self, binary_form=False):\n return self.sslobj.getpeercert(binary_form)\n\n def version(self):\n return self.sslobj.version()\n\n def cipher(self):\n return self.sslobj.cipher()\n\n def selected_alpn_protocol(self):\n return self.sslobj.selected_alpn_protocol()\n\n def selected_npn_protocol(self):\n return self.sslobj.selected_npn_protocol()\n\n def shared_ciphers(self):\n return self.sslobj.shared_ciphers()\n\n def compression(self):\n return self.sslobj.compression()\n\n def settimeout(self, value):\n self.socket.settimeout(value)\n\n def gettimeout(self):\n return self.socket.gettimeout()\n\n def _decref_socketios(self):\n self.socket._decref_socketios()\n\n def _wrap_ssl_read(self, len, buffer=None):\n try:\n return self._ssl_io_loop(self.sslobj.read, len, buffer)\n except ssl.SSLError as e:\n if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:\n return 0 # eof, return 0.\n else:\n raise\n\n def _ssl_io_loop(self, func, *args):\n \"\"\"Performs an I/O loop between incoming/outgoing and the socket.\"\"\"\n should_loop = True\n ret = None\n\n while should_loop:\n errno = None\n try:\n ret = func(*args)\n except ssl.SSLError as e:\n if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):\n # WANT_READ, and WANT_WRITE are expected, others are not.\n raise e\n errno = e.errno\n\n buf = self.outgoing.read()\n self.socket.sendall(buf)\n\n if errno is None:\n should_loop = False\n elif errno == ssl.SSL_ERROR_WANT_READ:\n buf = self.socket.recv(SSL_BLOCKSIZE)\n if buf:\n self.incoming.write(buf)\n else:\n self.incoming.write_eof()\n return ret\n",
"step-ids": [
19,
23,
27,
28,
30
]
}
|
[
19,
23,
27,
28,
30
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('adminsite', '0005_auto_20190324_0706')]
operations = [migrations.RenameField(model_name='district', old_name=
'District', new_name='district')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('adminsite', '0005_auto_20190324_0706')]
operations = [migrations.RenameField(model_name='district', old_name=
'District', new_name='district')]
<|reserved_special_token_1|>
# Generated by Django 2.1.7 on 2019-03-24 07:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('adminsite', '0005_auto_20190324_0706'),
]
operations = [
migrations.RenameField(
model_name='district',
old_name='District',
new_name='district',
),
]
|
flexible
|
{
"blob_id": "6e56c7792d88385cc28c48a7d6dd32b9d6917c64",
"index": 2913,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('adminsite', '0005_auto_20190324_0706')]\n operations = [migrations.RenameField(model_name='district', old_name=\n 'District', new_name='district')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('adminsite', '0005_auto_20190324_0706')]\n operations = [migrations.RenameField(model_name='district', old_name=\n 'District', new_name='district')]\n",
"step-5": "# Generated by Django 2.1.7 on 2019-03-24 07:08\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('adminsite', '0005_auto_20190324_0706'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='district',\n old_name='District',\n new_name='district',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def GetDatetimeFromMyFormat(l):
l_words = l.split()
l_days = l_words[0].split('-')
l_times = l_words[1].split(':')
out = datetime.datetime(int(l_days[0]), int(l_days[1]), int(l_days[2]),
int(l_times[0]), int(l_times[1]), int(l_times[2]))
return out
<|reserved_special_token_1|>
import datetime, os
def GetDatetimeFromMyFormat(l):
l_words = l.split()
l_days = l_words[0].split('-')
l_times = l_words[1].split(':')
out = datetime.datetime(int(l_days[0]), int(l_days[1]), int(l_days[2]),
int(l_times[0]), int(l_times[1]), int(l_times[2]))
return out
<|reserved_special_token_1|>
import datetime,os
def GetDatetimeFromMyFormat(l):
# l = "2018-5-17 19:18:45"
l_words = l.split()
l_days = l_words[0].split('-')
l_times = l_words[1].split(':')
out = datetime.datetime(int(l_days[0]),int(l_days[1]),int(l_days[2]),int(l_times[0]),int(l_times[1]),int(l_times[2]))
return out
|
flexible
|
{
"blob_id": "6767302869d73d041e2d7061722e05484d19f3e0",
"index": 4752,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef GetDatetimeFromMyFormat(l):\n l_words = l.split()\n l_days = l_words[0].split('-')\n l_times = l_words[1].split(':')\n out = datetime.datetime(int(l_days[0]), int(l_days[1]), int(l_days[2]),\n int(l_times[0]), int(l_times[1]), int(l_times[2]))\n return out\n",
"step-3": "import datetime, os\n\n\ndef GetDatetimeFromMyFormat(l):\n l_words = l.split()\n l_days = l_words[0].split('-')\n l_times = l_words[1].split(':')\n out = datetime.datetime(int(l_days[0]), int(l_days[1]), int(l_days[2]),\n int(l_times[0]), int(l_times[1]), int(l_times[2]))\n return out\n",
"step-4": "import datetime,os\n\ndef GetDatetimeFromMyFormat(l):\n # l = \"2018-5-17 19:18:45\"\n l_words = l.split()\n l_days = l_words[0].split('-')\n l_times = l_words[1].split(':')\n out = datetime.datetime(int(l_days[0]),int(l_days[1]),int(l_days[2]),int(l_times[0]),int(l_times[1]),int(l_times[2]))\n return out\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class SurveyRepository:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class SurveyRepository:
def __init__(self):
self._surveys = {}
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class SurveyRepository:
def __init__(self):
self._surveys = {}
<|reserved_special_token_0|>
def save(self, survey):
self._surveys[survey.id] = survey
<|reserved_special_token_1|>
class SurveyRepository:
def __init__(self):
self._surveys = {}
def get_survey(self, survey_id):
if survey_id in self._surveys:
return self._surveys[survey_id]
def save(self, survey):
self._surveys[survey.id] = survey
|
flexible
|
{
"blob_id": "961643e93582bd92e148d00efebbfe38f99100fc",
"index": 2866,
"step-1": "class SurveyRepository:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class SurveyRepository:\n\n def __init__(self):\n self._surveys = {}\n <mask token>\n <mask token>\n",
"step-3": "class SurveyRepository:\n\n def __init__(self):\n self._surveys = {}\n <mask token>\n\n def save(self, survey):\n self._surveys[survey.id] = survey\n",
"step-4": "class SurveyRepository:\n\n def __init__(self):\n self._surveys = {}\n\n def get_survey(self, survey_id):\n if survey_id in self._surveys:\n return self._surveys[survey_id]\n\n def save(self, survey):\n self._surveys[survey.id] = survey\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from tkinter import *
global P,M,G,en
P=0
M=0
G=0
en=1
def inicio():
global P,M,G,en
B1=Button(ventana,text="CAJAS PEQUEÑAS",command=A,state="normal",bg="yellow").grid(column=1,row=1)
B2=Button(ventana,text="CAJAS MEDIANAS",command=B,state="normal",bg="orange").grid(column=2,row=1)
B3=Button(ventana,text="CAJAS GRANDES",command=C,state="normal",bg="red").grid(column=3,row=1)
B4=Button(ventana,text="TOTAL DE CAJAS",command=D,state="normal",bg="green").grid(column=4,row=1)
def A ():
global P
P=P+1
def B ():
global M
M=M+1
def C ():
global G
G=G+1
def D ():
global P,M,G
l=Label(ventana,text="El total de CAJAS PEQUEÑAS es:"+str(P)).grid(column=0,row=2)
l=Label(ventana,text="El total de CAJAS MEDIANAS es:"+str(M)).grid(column=0,row=3)
l=Label(ventana,text="El total de CAJAS GRANDES es:"+str(G)).grid(column=0,row=4)
l=Label(ventana,text="EL TOTAL DE CAJAS CONTADAS ES:"+str(P+M+G)).grid(column=0,row=5)
if(en==1):
inicio()
ventana=Tk()
inicio()
ventana.mainloop()
|
normal
|
{
"blob_id": "393af07fa7a5c265dbdd3047ef33a77130edf259",
"index": 1915,
"step-1": "<mask token>\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\n<mask token>\n",
"step-2": "<mask token>\nglobal P, M, G, en\n<mask token>\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\n<mask token>\ninicio()\nventana.mainloop()\n",
"step-3": "<mask token>\nglobal P, M, G, en\nP = 0\nM = 0\nG = 0\nen = 1\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\nventana = Tk()\ninicio()\nventana.mainloop()\n",
"step-4": "from tkinter import *\nglobal P, M, G, en\nP = 0\nM = 0\nG = 0\nen = 1\n\n\ndef inicio():\n global P, M, G, en\n B1 = Button(ventana, text='CAJAS PEQUEÑAS', command=A, state='normal',\n bg='yellow').grid(column=1, row=1)\n B2 = Button(ventana, text='CAJAS MEDIANAS', command=B, state='normal',\n bg='orange').grid(column=2, row=1)\n B3 = Button(ventana, text='CAJAS GRANDES', command=C, state='normal',\n bg='red').grid(column=3, row=1)\n B4 = Button(ventana, text='TOTAL DE CAJAS', command=D, state='normal',\n bg='green').grid(column=4, row=1)\n\n\ndef A():\n global P\n P = P + 1\n\n\ndef B():\n global M\n M = M + 1\n\n\ndef C():\n global G\n G = G + 1\n\n\ndef D():\n global P, M, G\n l = Label(ventana, text='El total de CAJAS PEQUEÑAS es:' + str(P)).grid(\n column=0, row=2)\n l = Label(ventana, text='El total de CAJAS MEDIANAS es:' + str(M)).grid(\n column=0, row=3)\n l = Label(ventana, text='El total de CAJAS GRANDES es:' + str(G)).grid(\n column=0, row=4)\n l = Label(ventana, text='EL TOTAL DE CAJAS CONTADAS ES:' + str(P + M + G)\n ).grid(column=0, row=5)\n if en == 1:\n inicio()\n\n\nventana = Tk()\ninicio()\nventana.mainloop()\n",
"step-5": "from tkinter import *\r\nglobal P,M,G,en\r\nP=0\r\nM=0\r\nG=0\r\nen=1\r\ndef inicio():\r\n global P,M,G,en\r\n \r\n B1=Button(ventana,text=\"CAJAS PEQUEÑAS\",command=A,state=\"normal\",bg=\"yellow\").grid(column=1,row=1)\r\n B2=Button(ventana,text=\"CAJAS MEDIANAS\",command=B,state=\"normal\",bg=\"orange\").grid(column=2,row=1)\r\n B3=Button(ventana,text=\"CAJAS GRANDES\",command=C,state=\"normal\",bg=\"red\").grid(column=3,row=1)\r\n B4=Button(ventana,text=\"TOTAL DE CAJAS\",command=D,state=\"normal\",bg=\"green\").grid(column=4,row=1)\r\n \r\n\r\ndef A ():\r\n global P\r\n P=P+1\r\ndef B ():\r\n global M\r\n M=M+1\r\ndef C ():\r\n global G\r\n G=G+1\r\ndef D ():\r\n global P,M,G\r\n l=Label(ventana,text=\"El total de CAJAS PEQUEÑAS es:\"+str(P)).grid(column=0,row=2) \r\n l=Label(ventana,text=\"El total de CAJAS MEDIANAS es:\"+str(M)).grid(column=0,row=3)\r\n l=Label(ventana,text=\"El total de CAJAS GRANDES es:\"+str(G)).grid(column=0,row=4)\r\n l=Label(ventana,text=\"EL TOTAL DE CAJAS CONTADAS ES:\"+str(P+M+G)).grid(column=0,row=5)\r\n\r\n \r\n if(en==1):\r\n inicio()\r\n\r\n \r\nventana=Tk()\r\ninicio()\r\nventana.mainloop()\r\n\r\n\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import tkinter as tk
import random
import numpy as np
import copy
import time
#################################################################################
#
# Données de partie
NbSimulation = 20000
Data = [ [1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1] ]
GInit = np.array(Data,dtype=np.int8)
GInit = np.flip(GInit,0).transpose()
LARGEUR = 13
HAUTEUR = 17
# container pour passer efficacement toutes les données de la partie
class Game:
def __init__(self, Grille, PlayerX, PlayerY, Score=0):
self.PlayerX = PlayerX
self.PlayerY = PlayerY
self.Score = Score
self.Grille = Grille
def copy(self):
return copy.deepcopy(self)
GameInit = Game(GInit,3,5)
##############################################################
#
# création de la fenetre principale - NE PAS TOUCHER
L = 20 # largeur d'une case du jeu en pixel
largeurPix = LARGEUR * L
hauteurPix = HAUTEUR * L
Window = tk.Tk()
Window.geometry(str(largeurPix)+"x"+str(hauteurPix)) # taille de la fenetre
Window.title("TRON")
# création de la frame principale stockant toutes les pages
F = tk.Frame(Window)
F.pack(side="top", fill="both", expand=True)
F.grid_rowconfigure(0, weight=1)
F.grid_columnconfigure(0, weight=1)
# gestion des différentes pages
ListePages = {}
PageActive = 0
def CreerUnePage(id):
Frame = tk.Frame(F)
ListePages[id] = Frame
Frame.grid(row=0, column=0, sticky="nsew")
return Frame
def AfficherPage(id):
global PageActive
PageActive = id
ListePages[id].tkraise()
Frame0 = CreerUnePage(0)
canvas = tk.Canvas(Frame0,width = largeurPix, height = hauteurPix, bg ="black" )
canvas.place(x=0,y=0)
# Dessine la grille de jeu - ne pas toucher
def Affiche(Game):
canvas.delete("all")
H = canvas.winfo_height()
def DrawCase(x,y,coul):
x *= L
y *= L
canvas.create_rectangle(x,H-y,x+L,H-y-L,fill=coul)
# dessin des murs
for x in range (LARGEUR):
for y in range (HAUTEUR):
if Game.Grille[x,y] == 1 : DrawCase(x,y,"gray" )
if Game.Grille[x,y] == 2 : DrawCase(x,y,"cyan" )
# dessin de la moto
DrawCase(Game.PlayerX,Game.PlayerY,"red" )
def AfficheScore(Game):
info = "SCORE : " + str(Game.Score)
canvas.create_text(80, 13, font='Helvetica 12 bold', fill="yellow", text=info)
###########################################################
#
# gestion du joueur IA
# VOTRE CODE ICI
dx = np.array([0, -1, 0, 1, 0],dtype=np.int8)
dy = np.array([0, 0, 1, 0, -1],dtype=np.int8)
# scores associés à chaque déplacement
ds = np.array([0, 1, 1, 1, 1],dtype=np.int8)
def GetAllExectuableMove(Game):
possibleMove = [(0,+1),(0,-1),(+1,0),(-1,0)]
executableMove = []
for tup in possibleMove :
x,y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]
v = Game.Grille[x,y]
if v == 0 :
executableMove.append((x,y))
return executableMove
def Simulate(Game):
nb = NbSimulation
# on copie les datas de départ pour créer plusieurs parties
G = np.tile(Game.Grille,(nb,1,1)) # grille (x,y) pour chaque partie
X = np.tile(Game.PlayerX,nb) # playerX (x) pour chaque partie
Y = np.tile(Game.PlayerY,nb) # playerY (y) pour chaque partie
S = np.tile(Game.Score,nb) # score (s) pour chaque partie
I = np.arange(nb) # 0,1,2,3,...,nb-1
# VOTRE CODE ICI
continuer = True
while(continuer) :
# pour chaque partie, on fait une affectation à 2 le passage de la moto
G[I, X, Y] = 2
### pour chaque partie, on gère tous les index de déplacements possibles
# pour chaque partie, on associe une liste de taille 4 initialisée à 0
LPossibles = np.zeros((nb, 4),dtype=np.int8)
# pour chaque partie, on associe la liste de taille 4 à i si le joueur peut bouger dans cette direction, 0 sinon
for i in range(4):
LPossibles[I,i] = np.where(G[I, X+dx[i+1], Y+dy[i+1]] == 0,i+1,0)
# pour chaque partie, on trie la liste des directions de manière décroissante
LPossibles.sort(axis=1)
LPossibles = np.fliplr(LPossibles)
### pour chaque partie, on compte le nombre de déplacements possibles
# pour chaque partie, on compte le nombre d'éléments de LPossibles non nuls
Indices = np.count_nonzero(LPossibles, axis=1)
# pour chaque partie, on remplace les index de 0 par 1 pour pas planter sur le modulo
Indices[Indices == 0] = 1
# pour chaque partie, on génère un index de direction aléatoire
R = np.random.randint(12,size=nb,dtype=np.int8)
# pour chaque partie, on réucupère un vecteur position
Position = LPossibles[I, R % Indices[I]]
### on gère les déplacement et le code
# on arrete le traitement si, on est statique sur l'ensemble des parties
if(nb == np.count_nonzero(Position == 0)): continuer = False
# pour chaque partie, on incrémente le score
S[I] += ds[Position]
# pour chaque partie, on déplace le joueur
X += dx[Position]
Y += dy[Position]
# on retourne la moyenne des scores
return np.mean(S)
def MonteCarlo(Game):
return Simulate(Game)
def MovePlayerWithIA(Game):
executableMove = GetAllExectuableMove(Game)
result = (None, None)
maxi = 0
if(len(executableMove)==0):
return None, None
for x,y in executableMove:
Game.PlayerX = x
Game.PlayerY = y
total = MonteCarlo(Game)
if(total>maxi):
result = (x,y)
maxi = total
return result
def Play(Game):
x,y = Game.PlayerX, Game.PlayerY
Game.Grille[x,y] = 2 # laisse la trace de la moto
x,y = MovePlayerWithIA(Game)
if x == None or y == None :
# collision détectée
return True # partie terminée
else :
Game.PlayerX = x # valide le déplacement
Game.PlayerY = y # valide le déplacement
Game.Score += 1
return False # la partie continue
################################################################################
CurrentGame = GameInit.copy()
def Partie():
Tstart = time.time()
PartieTermine = Play(CurrentGame)
print(time.time() - Tstart)
if not PartieTermine :
Affiche(CurrentGame)
# rappelle la fonction Partie() dans 30ms
# entre temps laisse l'OS réafficher l'interface
Window.after(1000,Partie)
else :
AfficheScore(CurrentGame)
#####################################################################################
#
# Mise en place de l'interface - ne pas toucher
AfficherPage(0)
Window.after(100,Partie)
Window.mainloop()
|
normal
|
{
"blob_id": "86177dfa9b8bed5916703edcc16ea4d01cbabf84",
"index": 3278,
"step-1": "<mask token>\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\n<mask token>\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\n<mask token>\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\n<mask token>\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\n<mask token>\n\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky='nsew')\n return Frame\n\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n\n\n<mask token>\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\n<mask token>\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\ndef Simulate(Game):\n nb = NbSimulation\n G = np.tile(Game.Grille, (nb, 1, 1))\n X = np.tile(Game.PlayerX, nb)\n Y = np.tile(Game.PlayerY, nb)\n S = np.tile(Game.Score, nb)\n I = np.arange(nb)\n continuer = True\n while continuer:\n G[I, X, Y] = 2\n LPossibles = np.zeros((nb, 4), dtype=np.int8)\n for i in range(4):\n LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==\n 0, i + 1, 0)\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n Indices = np.count_nonzero(LPossibles, axis=1)\n Indices[Indices == 0] = 1\n R = np.random.randint(12, size=nb, dtype=np.int8)\n Position = LPossibles[I, R % Indices[I]]\n if nb == np.count_nonzero(Position == 0):\n continuer = False\n S[I] += ds[Position]\n X += dx[Position]\n Y += dy[Position]\n return np.mean(S)\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\n<mask token>\nWindow.geometry(str(largeurPix) + 'x' + str(hauteurPix))\nWindow.title('TRON')\n<mask token>\nF.pack(side='top', fill='both', expand=True)\nF.grid_rowconfigure(0, weight=1)\nF.grid_columnconfigure(0, weight=1)\n<mask token>\n\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky='nsew')\n return Frame\n\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n\n\n<mask token>\ncanvas.place(x=0, y=0)\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\n<mask token>\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\ndef Simulate(Game):\n nb = NbSimulation\n G = np.tile(Game.Grille, (nb, 1, 1))\n X = np.tile(Game.PlayerX, nb)\n Y = np.tile(Game.PlayerY, nb)\n S = np.tile(Game.Score, nb)\n I = np.arange(nb)\n continuer = True\n while continuer:\n G[I, X, Y] = 2\n LPossibles = np.zeros((nb, 4), dtype=np.int8)\n for i in range(4):\n LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==\n 0, i + 1, 0)\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n Indices = np.count_nonzero(LPossibles, axis=1)\n Indices[Indices == 0] = 1\n R = np.random.randint(12, size=nb, dtype=np.int8)\n Position = LPossibles[I, R % Indices[I]]\n if nb == np.count_nonzero(Position == 0):\n continuer = False\n S[I] += ds[Position]\n X += dx[Position]\n Y += dy[Position]\n return np.mean(S)\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\ndef Play(Game):\n x, y = Game.PlayerX, Game.PlayerY\n Game.Grille[x, y] = 2\n x, y = MovePlayerWithIA(Game)\n if x == None or y == None:\n return True\n else:\n Game.PlayerX = x\n Game.PlayerY = y\n Game.Score += 1\n return False\n\n\n<mask token>\n\n\ndef Partie():\n Tstart = time.time()\n PartieTermine = Play(CurrentGame)\n print(time.time() - Tstart)\n if not PartieTermine:\n Affiche(CurrentGame)\n Window.after(1000, Partie)\n else:\n AfficheScore(CurrentGame)\n\n\nAfficherPage(0)\nWindow.after(100, Partie)\nWindow.mainloop()\n",
"step-4": "<mask token>\nNbSimulation = 20000\nData = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\nGInit = np.array(Data, dtype=np.int8)\nGInit = np.flip(GInit, 0).transpose()\nLARGEUR = 13\nHAUTEUR = 17\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\nGameInit = Game(GInit, 3, 5)\nL = 20\nlargeurPix = LARGEUR * L\nhauteurPix = HAUTEUR * L\nWindow = tk.Tk()\nWindow.geometry(str(largeurPix) + 'x' + str(hauteurPix))\nWindow.title('TRON')\nF = tk.Frame(Window)\nF.pack(side='top', fill='both', expand=True)\nF.grid_rowconfigure(0, weight=1)\nF.grid_columnconfigure(0, weight=1)\nListePages = {}\nPageActive = 0\n\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky='nsew')\n return Frame\n\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n\n\nFrame0 = CreerUnePage(0)\ncanvas = tk.Canvas(Frame0, width=largeurPix, height=hauteurPix, bg='black')\ncanvas.place(x=0, y=0)\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\ndx = np.array([0, -1, 0, 1, 0], dtype=np.int8)\ndy = np.array([0, 0, 1, 0, -1], dtype=np.int8)\nds = np.array([0, 1, 1, 1, 1], dtype=np.int8)\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\ndef Simulate(Game):\n nb = NbSimulation\n G = np.tile(Game.Grille, (nb, 1, 1))\n X = np.tile(Game.PlayerX, nb)\n Y = np.tile(Game.PlayerY, nb)\n S = np.tile(Game.Score, nb)\n I = np.arange(nb)\n continuer = True\n while continuer:\n G[I, X, Y] = 2\n LPossibles = np.zeros((nb, 4), dtype=np.int8)\n for i in range(4):\n LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==\n 0, i + 1, 0)\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n Indices = np.count_nonzero(LPossibles, axis=1)\n Indices[Indices == 0] = 1\n R = np.random.randint(12, size=nb, dtype=np.int8)\n Position = LPossibles[I, R % Indices[I]]\n if nb == np.count_nonzero(Position == 0):\n continuer = False\n S[I] += ds[Position]\n X += dx[Position]\n Y += dy[Position]\n return np.mean(S)\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\ndef Play(Game):\n x, y = Game.PlayerX, Game.PlayerY\n Game.Grille[x, y] = 2\n x, y = MovePlayerWithIA(Game)\n if x == None or y == None:\n return True\n else:\n Game.PlayerX = x\n Game.PlayerY = y\n Game.Score += 1\n return False\n\n\nCurrentGame = GameInit.copy()\n\n\ndef Partie():\n Tstart = time.time()\n PartieTermine = Play(CurrentGame)\n print(time.time() - Tstart)\n if not PartieTermine:\n Affiche(CurrentGame)\n Window.after(1000, Partie)\n else:\n AfficheScore(CurrentGame)\n\n\nAfficherPage(0)\nWindow.after(100, Partie)\nWindow.mainloop()\n",
"step-5": "import tkinter as tk\nimport random\nimport numpy as np\nimport copy \nimport time\n\n#################################################################################\n#\n# Données de partie\nNbSimulation = 20000\nData = [ [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,1,0,0,0,0,0,0,0,0,0,0,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1] ]\n\nGInit = np.array(Data,dtype=np.int8)\nGInit = np.flip(GInit,0).transpose()\n\nLARGEUR = 13\nHAUTEUR = 17\n\n# container pour passer efficacement toutes les données de la partie\n\nclass Game:\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n \n def copy(self): \n return copy.deepcopy(self)\n\nGameInit = Game(GInit,3,5)\n\n##############################################################\n#\n# création de la fenetre principale - NE PAS TOUCHER\n\nL = 20 # largeur d'une case du jeu en pixel \nlargeurPix = LARGEUR * L\nhauteurPix = HAUTEUR * L\n\n\nWindow = tk.Tk()\nWindow.geometry(str(largeurPix)+\"x\"+str(hauteurPix)) # taille de la fenetre\nWindow.title(\"TRON\")\n\n\n# création de la frame principale stockant toutes les pages\n\nF = tk.Frame(Window)\nF.pack(side=\"top\", fill=\"both\", expand=True)\nF.grid_rowconfigure(0, weight=1)\nF.grid_columnconfigure(0, weight=1)\n\n# gestion des différentes pages\n\nListePages = {}\nPageActive = 0\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky=\"nsew\")\n return Frame\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n \nFrame0 = CreerUnePage(0)\n\ncanvas = tk.Canvas(Frame0,width = largeurPix, height = hauteurPix, bg =\"black\" )\ncanvas.place(x=0,y=0)\n\n# Dessine la grille de jeu - ne pas toucher\n\n\ndef Affiche(Game):\n canvas.delete(\"all\")\n H = canvas.winfo_height()\n \n def DrawCase(x,y,coul):\n x *= L\n y *= L\n canvas.create_rectangle(x,H-y,x+L,H-y-L,fill=coul)\n \n # dessin des murs \n \n for x in range (LARGEUR):\n for y in range (HAUTEUR):\n if Game.Grille[x,y] == 1 : DrawCase(x,y,\"gray\" )\n if Game.Grille[x,y] == 2 : DrawCase(x,y,\"cyan\" )\n \n \n # dessin de la moto\n DrawCase(Game.PlayerX,Game.PlayerY,\"red\" )\n\ndef AfficheScore(Game):\n info = \"SCORE : \" + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill=\"yellow\", text=info)\n\n\n###########################################################\n#\n# gestion du joueur IA\n\n# VOTRE CODE ICI \ndx = np.array([0, -1, 0, 1, 0],dtype=np.int8)\ndy = np.array([0, 0, 1, 0, -1],dtype=np.int8)\n\n# scores associés à chaque déplacement\nds = np.array([0, 1, 1, 1, 1],dtype=np.int8)\ndef GetAllExectuableMove(Game):\n possibleMove = [(0,+1),(0,-1),(+1,0),(-1,0)]\n executableMove = []\n for tup in possibleMove :\n x,y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x,y]\n if v == 0 :\n executableMove.append((x,y))\n \n return executableMove\n\ndef Simulate(Game):\n\n nb = NbSimulation\n # on copie les datas de départ pour créer plusieurs parties\n G = np.tile(Game.Grille,(nb,1,1)) # grille (x,y) pour chaque partie\n X = np.tile(Game.PlayerX,nb) # playerX (x) pour chaque partie\n Y = np.tile(Game.PlayerY,nb) # playerY (y) pour chaque partie\n S = np.tile(Game.Score,nb) # score (s) pour chaque partie\n I = np.arange(nb) # 0,1,2,3,...,nb-1\n\n # VOTRE CODE ICI\n continuer = True\n\n while(continuer) :\n\n # pour chaque partie, on fait une affectation à 2 le passage de la moto\n G[I, X, Y] = 2\n\n\n ### pour chaque partie, on gère tous les index de déplacements possibles\n # pour chaque partie, on associe une liste de taille 4 initialisée à 0 \n LPossibles = np.zeros((nb, 4),dtype=np.int8)\n\n # pour chaque partie, on associe la liste de taille 4 à i si le joueur peut bouger dans cette direction, 0 sinon\n for i in range(4): \n LPossibles[I,i] = np.where(G[I, X+dx[i+1], Y+dy[i+1]] == 0,i+1,0)\n\n # pour chaque partie, on trie la liste des directions de manière décroissante\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n\n\n ### pour chaque partie, on compte le nombre de déplacements possibles\n # pour chaque partie, on compte le nombre d'éléments de LPossibles non nuls\n Indices = np.count_nonzero(LPossibles, axis=1)\n \n # pour chaque partie, on remplace les index de 0 par 1 pour pas planter sur le modulo\n Indices[Indices == 0] = 1\n\n # pour chaque partie, on génère un index de direction aléatoire\n R = np.random.randint(12,size=nb,dtype=np.int8)\n\n # pour chaque partie, on réucupère un vecteur position\n Position = LPossibles[I, R % Indices[I]]\n \n\n ### on gère les déplacement et le code\n\n # on arrete le traitement si, on est statique sur l'ensemble des parties\n if(nb == np.count_nonzero(Position == 0)): continuer = False\n\n # pour chaque partie, on incrémente le score\n S[I] += ds[Position]\n\n # pour chaque partie, on déplace le joueur\n X += dx[Position]\n Y += dy[Position]\n\n # on retourne la moyenne des scores\n return np.mean(S)\n\n\n \ndef MonteCarlo(Game):\n return Simulate(Game)\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = (None, None)\n maxi = 0\n if(len(executableMove)==0):\n return None, None\n\n for x,y in executableMove:\n Game.PlayerX = x \n Game.PlayerY = y\n total = MonteCarlo(Game)\n if(total>maxi):\n result = (x,y)\n maxi = total\n return result\n\ndef Play(Game): \n \n x,y = Game.PlayerX, Game.PlayerY\n\n Game.Grille[x,y] = 2 # laisse la trace de la moto\n\n x,y = MovePlayerWithIA(Game)\n if x == None or y == None :\n # collision détectée\n return True # partie terminée\n else :\n Game.PlayerX = x # valide le déplacement\n Game.PlayerY = y # valide le déplacement\n Game.Score += 1\n return False # la partie continue\n \n\n################################################################################\n \nCurrentGame = GameInit.copy()\n \n\ndef Partie():\n Tstart = time.time()\n PartieTermine = Play(CurrentGame)\n print(time.time() - Tstart)\n if not PartieTermine :\n Affiche(CurrentGame)\n # rappelle la fonction Partie() dans 30ms\n # entre temps laisse l'OS réafficher l'interface\n Window.after(1000,Partie) \n else :\n AfficheScore(CurrentGame)\n\n\n#####################################################################################\n#\n# Mise en place de l'interface - ne pas toucher\n\nAfficherPage(0)\nWindow.after(100,Partie)\nWindow.mainloop()\n \n\n \n \n\n \n \n\n",
"step-ids": [
8,
11,
14,
15,
17
]
}
|
[
8,
11,
14,
15,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(reader.categories())
print(reader.fileids())
<|reserved_special_token_0|>
print(fileP)
print(fileN)
for w in reader.words(fileP):
print(w + ' ', end='')
if w is '.':
print()
for w in reader.words(fileN):
print(w + ' ', end='')
if w is '.':
print()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
reader = CategorizedPlaintextCorpusReader('/workspace/NLP_python/tokens',
'.*\\.txt', cat_pattern='(\\w+)/*')
print(reader.categories())
print(reader.fileids())
posFiles = reader.fileids(categories='pos')
negFiles = reader.fileids(categories='neg')
fileP = posFiles[randint(0, len(posFiles) - 1)]
fileN = negFiles[randint(0, len(negFiles) - 1)]
print(fileP)
print(fileN)
for w in reader.words(fileP):
print(w + ' ', end='')
if w is '.':
print()
for w in reader.words(fileN):
print(w + ' ', end='')
if w is '.':
print()
<|reserved_special_token_1|>
from nltk.corpus import CategorizedPlaintextCorpusReader
from random import randint
reader = CategorizedPlaintextCorpusReader('/workspace/NLP_python/tokens',
'.*\\.txt', cat_pattern='(\\w+)/*')
print(reader.categories())
print(reader.fileids())
posFiles = reader.fileids(categories='pos')
negFiles = reader.fileids(categories='neg')
fileP = posFiles[randint(0, len(posFiles) - 1)]
fileN = negFiles[randint(0, len(negFiles) - 1)]
print(fileP)
print(fileN)
for w in reader.words(fileP):
print(w + ' ', end='')
if w is '.':
print()
for w in reader.words(fileN):
print(w + ' ', end='')
if w is '.':
print()
<|reserved_special_token_1|>
# 1장 말뭉치와 워드넷 - 외부 말뭉치 다운로드, 로드하고 액세스하기
from nltk.corpus import CategorizedPlaintextCorpusReader
from random import randint
# 말뭉치 읽기
reader = CategorizedPlaintextCorpusReader(r'/workspace/NLP_python/tokens', r'.*\.txt', cat_pattern=r'(\w+)/*')
print(reader.categories())
print(reader.fileids())
# 샘플 문서 출력
# pos, neg 카테고리의 샘플 목록
posFiles = reader.fileids(categories='pos')
negFiles = reader.fileids(categories='neg')
# pos, neg 카테고리에서 각각 임의의 파일 선택
fileP = posFiles[randint(0, len(posFiles)-1)]
fileN = negFiles[randint(0, len(negFiles)-1)]
print(fileP)
print(fileN)
# 액세스한 임의 파일을 문장으로 출력
for w in reader.words(fileP):
print(w + ' ', end='')
if w is '.':
print()
for w in reader.words(fileN):
print(w + ' ', end='')
if w is '.':
print()
|
flexible
|
{
"blob_id": "81cec5c1f28e92bf8e4adc2e2c632e072ed1f901",
"index": 5765,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(reader.categories())\nprint(reader.fileids())\n<mask token>\nprint(fileP)\nprint(fileN)\nfor w in reader.words(fileP):\n print(w + ' ', end='')\n if w is '.':\n print()\nfor w in reader.words(fileN):\n print(w + ' ', end='')\n if w is '.':\n print()\n",
"step-3": "<mask token>\nreader = CategorizedPlaintextCorpusReader('/workspace/NLP_python/tokens',\n '.*\\\\.txt', cat_pattern='(\\\\w+)/*')\nprint(reader.categories())\nprint(reader.fileids())\nposFiles = reader.fileids(categories='pos')\nnegFiles = reader.fileids(categories='neg')\nfileP = posFiles[randint(0, len(posFiles) - 1)]\nfileN = negFiles[randint(0, len(negFiles) - 1)]\nprint(fileP)\nprint(fileN)\nfor w in reader.words(fileP):\n print(w + ' ', end='')\n if w is '.':\n print()\nfor w in reader.words(fileN):\n print(w + ' ', end='')\n if w is '.':\n print()\n",
"step-4": "from nltk.corpus import CategorizedPlaintextCorpusReader\nfrom random import randint\nreader = CategorizedPlaintextCorpusReader('/workspace/NLP_python/tokens',\n '.*\\\\.txt', cat_pattern='(\\\\w+)/*')\nprint(reader.categories())\nprint(reader.fileids())\nposFiles = reader.fileids(categories='pos')\nnegFiles = reader.fileids(categories='neg')\nfileP = posFiles[randint(0, len(posFiles) - 1)]\nfileN = negFiles[randint(0, len(negFiles) - 1)]\nprint(fileP)\nprint(fileN)\nfor w in reader.words(fileP):\n print(w + ' ', end='')\n if w is '.':\n print()\nfor w in reader.words(fileN):\n print(w + ' ', end='')\n if w is '.':\n print()\n",
"step-5": "# 1장 말뭉치와 워드넷 - 외부 말뭉치 다운로드, 로드하고 액세스하기\nfrom nltk.corpus import CategorizedPlaintextCorpusReader\nfrom random import randint\n\n# 말뭉치 읽기\nreader = CategorizedPlaintextCorpusReader(r'/workspace/NLP_python/tokens', r'.*\\.txt', cat_pattern=r'(\\w+)/*')\nprint(reader.categories())\nprint(reader.fileids())\n\n# 샘플 문서 출력\n# pos, neg 카테고리의 샘플 목록\nposFiles = reader.fileids(categories='pos')\nnegFiles = reader.fileids(categories='neg')\n\n# pos, neg 카테고리에서 각각 임의의 파일 선택\nfileP = posFiles[randint(0, len(posFiles)-1)]\nfileN = negFiles[randint(0, len(negFiles)-1)]\nprint(fileP)\nprint(fileN)\n\n# 액세스한 임의 파일을 문장으로 출력\nfor w in reader.words(fileP):\n print(w + ' ', end='')\n if w is '.':\n print()\nfor w in reader.words(fileN):\n print(w + ' ', end='')\n if w is '.':\n print()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.files import File as DjangoFile
from django.core.management.base import BaseCommand, NoArgsCommand
from filer.models.filemodels import File
from leonardo.module.media.models import *
from filer.settings import FILER_IS_PUBLIC_DEFAULT
from filer.utils.compatibility import upath
from optparse import make_option
import os
MEDIA_MODELS = [Image, Document, Vector, Video]
class FileImporter(object):
def __init__(self, * args, **kwargs):
self.path = kwargs.get('path')
self.base_folder = kwargs.get('base_folder')
self.verbosity = int(kwargs.get('verbosity', 1))
self.file_created = 0
self.image_created = 0
self.folder_created = 0
def import_file(self, file_obj, folder):
"""
Create a File or an Image into the given folder
"""
created = False
for cls in MEDIA_MODELS:
if cls.matches_file_type(file_obj.name):
obj, created = cls.objects.get_or_create(
original_filename=file_obj.name,
file=file_obj,
folder=folder,
is_public=FILER_IS_PUBLIC_DEFAULT)
if created:
self.image_created += 1
if not created:
obj, created = File.objects.get_or_create(
original_filename=file_obj.name,
file=file_obj,
folder=folder,
is_public=FILER_IS_PUBLIC_DEFAULT)
if created:
self.file_created += 1
if self.verbosity >= 2:
print("file_created #%s / image_created #%s -- file : %s -- created : %s" % (self.file_created,
self.image_created,
obj, created))
return obj
def get_or_create_folder(self, folder_names):
"""
Gets or creates a Folder based the list of folder names in hierarchical
order (like breadcrumbs).
get_or_create_folder(['root', 'subfolder', 'subsub folder'])
creates the folders with correct parent relations and returns the
'subsub folder' instance.
"""
if not len(folder_names):
return None
current_parent = None
for folder_name in folder_names:
current_parent, created = Folder.objects.get_or_create(
name=folder_name, parent=current_parent)
if created:
self.folder_created += 1
if self.verbosity >= 2:
print("folder_created #%s folder : %s -- created : %s" % (self.folder_created,
current_parent, created))
return current_parent
def walker(self, path=None, base_folder=None):
"""
This method walk a directory structure and create the
Folders and Files as they appear.
"""
path = path or self.path
base_folder = base_folder or self.base_folder
# prevent trailing slashes and other inconsistencies on path.
path = os.path.normpath(upath(path))
if base_folder:
base_folder = os.path.normpath(upath(base_folder))
print("The directory structure will be imported in %s" % (base_folder,))
if self.verbosity >= 1:
print("Import the folders and files in %s" % (path,))
root_folder_name = os.path.basename(path)
for root, dirs, files in os.walk(path):
rel_folders = root.partition(path)[2].strip(os.path.sep).split(os.path.sep)
while '' in rel_folders:
rel_folders.remove('')
if base_folder:
folder_names = base_folder.split('/') + [root_folder_name] + rel_folders
else:
folder_names = [root_folder_name] + rel_folders
folder = self.get_or_create_folder(folder_names)
for file_obj in files:
dj_file = DjangoFile(open(os.path.join(root, file_obj)),
name=file_obj)
self.import_file(file_obj=dj_file, folder=folder)
if self.verbosity >= 1:
print(('folder_created #%s / file_created #%s / ' +
'image_created #%s') % (
self.folder_created, self.file_created,
self.image_created))
class Command(NoArgsCommand):
"""
Import directory structure into the filer ::
manage.py --path=/tmp/assets/images
manage.py --path=/tmp/assets/news --folder=images
"""
option_list = BaseCommand.option_list + (
make_option('--path',
action='store',
dest='path',
default=False,
help='Import files located in the path into django-filer'),
make_option('--folder',
action='store',
dest='base_folder',
default=False,
help='Specify the destination folder in which the directory structure should be imported'),
)
def handle_noargs(self, **options):
file_importer = FileImporter(**options)
file_importer.walker()
|
normal
|
{
"blob_id": "864e9063ec1ed80cd1da3128a38633cbeb2f8bba",
"index": 3775,
"step-1": "<mask token>\n\n\nclass Command(NoArgsCommand):\n \"\"\"\n Import directory structure into the filer ::\n\n manage.py --path=/tmp/assets/images\n manage.py --path=/tmp/assets/news --folder=images\n \"\"\"\n option_list = BaseCommand.option_list + (make_option('--path', action=\n 'store', dest='path', default=False, help=\n 'Import files located in the path into django-filer'), make_option(\n '--folder', action='store', dest='base_folder', default=False, help\n =\n 'Specify the destination folder in which the directory structure should be imported'\n ))\n\n def handle_noargs(self, **options):\n file_importer = FileImporter(**options)\n file_importer.walker()\n",
"step-2": "<mask token>\n\n\nclass FileImporter(object):\n <mask token>\n\n def import_file(self, file_obj, folder):\n \"\"\"\n Create a File or an Image into the given folder\n \"\"\"\n created = False\n for cls in MEDIA_MODELS:\n if cls.matches_file_type(file_obj.name):\n obj, created = cls.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.image_created += 1\n if not created:\n obj, created = File.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.file_created += 1\n if self.verbosity >= 2:\n print(\n 'file_created #%s / image_created #%s -- file : %s -- created : %s'\n % (self.file_created, self.image_created, obj, created))\n return obj\n <mask token>\n <mask token>\n\n\nclass Command(NoArgsCommand):\n \"\"\"\n Import directory structure into the filer ::\n\n manage.py --path=/tmp/assets/images\n manage.py --path=/tmp/assets/news --folder=images\n \"\"\"\n option_list = BaseCommand.option_list + (make_option('--path', action=\n 'store', dest='path', default=False, help=\n 'Import files located in the path into django-filer'), make_option(\n '--folder', action='store', dest='base_folder', default=False, help\n =\n 'Specify the destination folder in which the directory structure should be imported'\n ))\n\n def handle_noargs(self, **options):\n file_importer = FileImporter(**options)\n file_importer.walker()\n",
"step-3": "<mask token>\n\n\nclass FileImporter(object):\n\n def __init__(self, *args, **kwargs):\n self.path = kwargs.get('path')\n self.base_folder = kwargs.get('base_folder')\n self.verbosity = int(kwargs.get('verbosity', 1))\n self.file_created = 0\n self.image_created = 0\n self.folder_created = 0\n\n def import_file(self, file_obj, folder):\n \"\"\"\n Create a File or an Image into the given folder\n \"\"\"\n created = False\n for cls in MEDIA_MODELS:\n if cls.matches_file_type(file_obj.name):\n obj, created = cls.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.image_created += 1\n if not created:\n obj, created = File.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.file_created += 1\n if self.verbosity >= 2:\n print(\n 'file_created #%s / image_created #%s -- file : %s -- created : %s'\n % (self.file_created, self.image_created, obj, created))\n return obj\n\n def get_or_create_folder(self, folder_names):\n \"\"\"\n Gets or creates a Folder based the list of folder names in hierarchical\n order (like breadcrumbs).\n\n get_or_create_folder(['root', 'subfolder', 'subsub folder'])\n\n creates the folders with correct parent relations and returns the\n 'subsub folder' instance.\n \"\"\"\n if not len(folder_names):\n return None\n current_parent = None\n for folder_name in folder_names:\n current_parent, created = Folder.objects.get_or_create(name=\n folder_name, parent=current_parent)\n if created:\n self.folder_created += 1\n if self.verbosity >= 2:\n print('folder_created #%s folder : %s -- created : %s' %\n (self.folder_created, current_parent, created))\n return current_parent\n\n def walker(self, path=None, base_folder=None):\n \"\"\"\n This method walk a directory structure and create the\n Folders and Files as they appear.\n \"\"\"\n path = path or self.path\n base_folder = base_folder or self.base_folder\n path = os.path.normpath(upath(path))\n if base_folder:\n base_folder = os.path.normpath(upath(base_folder))\n print('The directory structure will be imported in %s' % (\n base_folder,))\n if self.verbosity >= 1:\n print('Import the folders and files in %s' % (path,))\n root_folder_name = os.path.basename(path)\n for root, dirs, files in os.walk(path):\n rel_folders = root.partition(path)[2].strip(os.path.sep).split(os\n .path.sep)\n while '' in rel_folders:\n rel_folders.remove('')\n if base_folder:\n folder_names = base_folder.split('/') + [root_folder_name\n ] + rel_folders\n else:\n folder_names = [root_folder_name] + rel_folders\n folder = self.get_or_create_folder(folder_names)\n for file_obj in files:\n dj_file = DjangoFile(open(os.path.join(root, file_obj)),\n name=file_obj)\n self.import_file(file_obj=dj_file, folder=folder)\n if self.verbosity >= 1:\n print(('folder_created #%s / file_created #%s / ' +\n 'image_created #%s') % (self.folder_created, self.\n file_created, self.image_created))\n\n\nclass Command(NoArgsCommand):\n \"\"\"\n Import directory structure into the filer ::\n\n manage.py --path=/tmp/assets/images\n manage.py --path=/tmp/assets/news --folder=images\n \"\"\"\n option_list = BaseCommand.option_list + (make_option('--path', action=\n 'store', dest='path', default=False, help=\n 'Import files located in the path into django-filer'), make_option(\n '--folder', action='store', dest='base_folder', default=False, help\n =\n 'Specify the destination folder in which the directory structure should be imported'\n ))\n\n def handle_noargs(self, **options):\n file_importer = FileImporter(**options)\n file_importer.walker()\n",
"step-4": "from __future__ import unicode_literals\nfrom django.core.files import File as DjangoFile\nfrom django.core.management.base import BaseCommand, NoArgsCommand\nfrom filer.models.filemodels import File\nfrom leonardo.module.media.models import *\nfrom filer.settings import FILER_IS_PUBLIC_DEFAULT\nfrom filer.utils.compatibility import upath\nfrom optparse import make_option\nimport os\nMEDIA_MODELS = [Image, Document, Vector, Video]\n\n\nclass FileImporter(object):\n\n def __init__(self, *args, **kwargs):\n self.path = kwargs.get('path')\n self.base_folder = kwargs.get('base_folder')\n self.verbosity = int(kwargs.get('verbosity', 1))\n self.file_created = 0\n self.image_created = 0\n self.folder_created = 0\n\n def import_file(self, file_obj, folder):\n \"\"\"\n Create a File or an Image into the given folder\n \"\"\"\n created = False\n for cls in MEDIA_MODELS:\n if cls.matches_file_type(file_obj.name):\n obj, created = cls.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.image_created += 1\n if not created:\n obj, created = File.objects.get_or_create(original_filename=\n file_obj.name, file=file_obj, folder=folder, is_public=\n FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.file_created += 1\n if self.verbosity >= 2:\n print(\n 'file_created #%s / image_created #%s -- file : %s -- created : %s'\n % (self.file_created, self.image_created, obj, created))\n return obj\n\n def get_or_create_folder(self, folder_names):\n \"\"\"\n Gets or creates a Folder based the list of folder names in hierarchical\n order (like breadcrumbs).\n\n get_or_create_folder(['root', 'subfolder', 'subsub folder'])\n\n creates the folders with correct parent relations and returns the\n 'subsub folder' instance.\n \"\"\"\n if not len(folder_names):\n return None\n current_parent = None\n for folder_name in folder_names:\n current_parent, created = Folder.objects.get_or_create(name=\n folder_name, parent=current_parent)\n if created:\n self.folder_created += 1\n if self.verbosity >= 2:\n print('folder_created #%s folder : %s -- created : %s' %\n (self.folder_created, current_parent, created))\n return current_parent\n\n def walker(self, path=None, base_folder=None):\n \"\"\"\n This method walk a directory structure and create the\n Folders and Files as they appear.\n \"\"\"\n path = path or self.path\n base_folder = base_folder or self.base_folder\n path = os.path.normpath(upath(path))\n if base_folder:\n base_folder = os.path.normpath(upath(base_folder))\n print('The directory structure will be imported in %s' % (\n base_folder,))\n if self.verbosity >= 1:\n print('Import the folders and files in %s' % (path,))\n root_folder_name = os.path.basename(path)\n for root, dirs, files in os.walk(path):\n rel_folders = root.partition(path)[2].strip(os.path.sep).split(os\n .path.sep)\n while '' in rel_folders:\n rel_folders.remove('')\n if base_folder:\n folder_names = base_folder.split('/') + [root_folder_name\n ] + rel_folders\n else:\n folder_names = [root_folder_name] + rel_folders\n folder = self.get_or_create_folder(folder_names)\n for file_obj in files:\n dj_file = DjangoFile(open(os.path.join(root, file_obj)),\n name=file_obj)\n self.import_file(file_obj=dj_file, folder=folder)\n if self.verbosity >= 1:\n print(('folder_created #%s / file_created #%s / ' +\n 'image_created #%s') % (self.folder_created, self.\n file_created, self.image_created))\n\n\nclass Command(NoArgsCommand):\n \"\"\"\n Import directory structure into the filer ::\n\n manage.py --path=/tmp/assets/images\n manage.py --path=/tmp/assets/news --folder=images\n \"\"\"\n option_list = BaseCommand.option_list + (make_option('--path', action=\n 'store', dest='path', default=False, help=\n 'Import files located in the path into django-filer'), make_option(\n '--folder', action='store', dest='base_folder', default=False, help\n =\n 'Specify the destination folder in which the directory structure should be imported'\n ))\n\n def handle_noargs(self, **options):\n file_importer = FileImporter(**options)\n file_importer.walker()\n",
"step-5": "#-*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.core.files import File as DjangoFile\nfrom django.core.management.base import BaseCommand, NoArgsCommand\nfrom filer.models.filemodels import File\nfrom leonardo.module.media.models import *\nfrom filer.settings import FILER_IS_PUBLIC_DEFAULT\nfrom filer.utils.compatibility import upath\nfrom optparse import make_option\nimport os\n\nMEDIA_MODELS = [Image, Document, Vector, Video]\n\n\nclass FileImporter(object):\n\n def __init__(self, * args, **kwargs):\n self.path = kwargs.get('path')\n self.base_folder = kwargs.get('base_folder')\n self.verbosity = int(kwargs.get('verbosity', 1))\n self.file_created = 0\n self.image_created = 0\n self.folder_created = 0\n\n def import_file(self, file_obj, folder):\n \"\"\"\n Create a File or an Image into the given folder\n \"\"\"\n created = False\n for cls in MEDIA_MODELS:\n if cls.matches_file_type(file_obj.name):\n\n obj, created = cls.objects.get_or_create(\n original_filename=file_obj.name,\n file=file_obj,\n folder=folder,\n is_public=FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.image_created += 1\n if not created:\n obj, created = File.objects.get_or_create(\n original_filename=file_obj.name,\n file=file_obj,\n folder=folder,\n is_public=FILER_IS_PUBLIC_DEFAULT)\n if created:\n self.file_created += 1\n if self.verbosity >= 2:\n print(\"file_created #%s / image_created #%s -- file : %s -- created : %s\" % (self.file_created,\n self.image_created,\n obj, created))\n return obj\n\n def get_or_create_folder(self, folder_names):\n \"\"\"\n Gets or creates a Folder based the list of folder names in hierarchical\n order (like breadcrumbs).\n\n get_or_create_folder(['root', 'subfolder', 'subsub folder'])\n\n creates the folders with correct parent relations and returns the\n 'subsub folder' instance.\n \"\"\"\n if not len(folder_names):\n return None\n current_parent = None\n for folder_name in folder_names:\n current_parent, created = Folder.objects.get_or_create(\n name=folder_name, parent=current_parent)\n if created:\n self.folder_created += 1\n if self.verbosity >= 2:\n print(\"folder_created #%s folder : %s -- created : %s\" % (self.folder_created,\n current_parent, created))\n return current_parent\n\n def walker(self, path=None, base_folder=None):\n \"\"\"\n This method walk a directory structure and create the\n Folders and Files as they appear.\n \"\"\"\n path = path or self.path\n base_folder = base_folder or self.base_folder\n # prevent trailing slashes and other inconsistencies on path.\n path = os.path.normpath(upath(path))\n if base_folder:\n base_folder = os.path.normpath(upath(base_folder))\n print(\"The directory structure will be imported in %s\" % (base_folder,))\n if self.verbosity >= 1:\n print(\"Import the folders and files in %s\" % (path,))\n root_folder_name = os.path.basename(path)\n for root, dirs, files in os.walk(path):\n rel_folders = root.partition(path)[2].strip(os.path.sep).split(os.path.sep)\n while '' in rel_folders:\n rel_folders.remove('')\n if base_folder:\n folder_names = base_folder.split('/') + [root_folder_name] + rel_folders\n else:\n folder_names = [root_folder_name] + rel_folders\n folder = self.get_or_create_folder(folder_names)\n for file_obj in files:\n dj_file = DjangoFile(open(os.path.join(root, file_obj)),\n name=file_obj)\n self.import_file(file_obj=dj_file, folder=folder)\n if self.verbosity >= 1:\n print(('folder_created #%s / file_created #%s / ' +\n 'image_created #%s') % (\n self.folder_created, self.file_created,\n self.image_created))\n\n\nclass Command(NoArgsCommand):\n\n \"\"\"\n Import directory structure into the filer ::\n\n manage.py --path=/tmp/assets/images\n manage.py --path=/tmp/assets/news --folder=images\n \"\"\"\n\n option_list = BaseCommand.option_list + (\n make_option('--path',\n action='store',\n dest='path',\n default=False,\n help='Import files located in the path into django-filer'),\n make_option('--folder',\n action='store',\n dest='base_folder',\n default=False,\n help='Specify the destination folder in which the directory structure should be imported'),\n )\n\n def handle_noargs(self, **options):\n file_importer = FileImporter(**options)\n file_importer.walker()\n",
"step-ids": [
4,
6,
9,
11,
12
]
}
|
[
4,
6,
9,
11,
12
] |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the Pipeline class."""
# pytype: skip-file
import copy
import platform
import unittest
import mock
import pytest
import apache_beam as beam
from apache_beam import typehints
from apache_beam.coders import BytesCoder
from apache_beam.io import Read
from apache_beam.io.iobase import SourceBase
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.pipeline import Pipeline
from apache_beam.pipeline import PipelineOptions
from apache_beam.pipeline import PipelineVisitor
from apache_beam.pipeline import PTransformOverride
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.pvalue import AsSingleton
from apache_beam.pvalue import TaggedOutput
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import CombineGlobally
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Map
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms import WindowInto
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.environments import ProcessEnvironment
from apache_beam.transforms.resources import ResourceHint
from apache_beam.transforms.userstate import BagStateSpec
from apache_beam.transforms.window import SlidingWindows
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MIN_TIMESTAMP
class FakeUnboundedSource(SourceBase):
"""Fake unbounded source. Does not work at runtime"""
def is_bounded(self):
return False
class DoubleParDo(beam.PTransform):
def expand(self, input):
return input | 'Inner' >> beam.Map(lambda a: a * 2)
def to_runner_api_parameter(self, context):
return self.to_runner_api_pickled(context)
class TripleParDo(beam.PTransform):
def expand(self, input):
# Keeping labels the same intentionally to make sure that there is no label
# conflict due to replacement.
return input | 'Inner' >> beam.Map(lambda a: a * 3)
class ToStringParDo(beam.PTransform):
def expand(self, input):
# We use copy.copy() here to make sure the typehint mechanism doesn't
# automatically infer that the output type is str.
return input | 'Inner' >> beam.Map(lambda a: copy.copy(str(a)))
class FlattenAndDouble(beam.PTransform):
def expand(self, pcolls):
return pcolls | beam.Flatten() | 'Double' >> DoubleParDo()
class FlattenAndTriple(beam.PTransform):
def expand(self, pcolls):
return pcolls | beam.Flatten() | 'Triple' >> TripleParDo()
class AddWithProductDoFn(beam.DoFn):
def process(self, input, a, b):
yield input + a * b
class AddThenMultiplyDoFn(beam.DoFn):
def process(self, input, a, b):
yield (input + a) * b
class AddThenMultiply(beam.PTransform):
def expand(self, pvalues):
return pvalues[0] | beam.ParDo(
AddThenMultiplyDoFn(), AsSingleton(pvalues[1]), AsSingleton(pvalues[2]))
class PipelineTest(unittest.TestCase):
@staticmethod
def custom_callable(pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
# Some of these tests designate a runner by name, others supply a runner.
# This variation is just to verify that both means of runner specification
# work and is not related to other aspects of the tests.
class CustomTransform(PTransform):
def expand(self, pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
class Visitor(PipelineVisitor):
def __init__(self, visited):
self.visited = visited
self.enter_composite = []
self.leave_composite = []
def visit_value(self, value, _):
self.visited.append(value)
def enter_composite_transform(self, transform_node):
self.enter_composite.append(transform_node)
def leave_composite_transform(self, transform_node):
self.leave_composite.append(transform_node)
def test_create(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
# Test if initial value is an iterator object.
pcoll2 = pipeline | 'label2' >> Create(iter((4, 5, 6)))
pcoll3 = pcoll2 | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll3, equal_to([14, 15, 16]), label='pcoll3')
def test_flatmap_builtin(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
pcoll2 = pcoll | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll2, equal_to([11, 12, 13]), label='pcoll2')
pcoll3 = pcoll2 | 'm1' >> Map(lambda x: [x, 12])
assert_that(
pcoll3, equal_to([[11, 12], [12, 12], [13, 12]]), label='pcoll3')
pcoll4 = pcoll3 | 'do2' >> FlatMap(set)
assert_that(pcoll4, equal_to([11, 12, 12, 12, 13]), label='pcoll4')
def test_maptuple_builtin(self):
with TestPipeline() as pipeline:
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(
pcoll | 'NoSides' >> beam.core.MapTuple(fn),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, None)]),
label='NoSidesCheck')
assert_that(
pcoll | 'StaticSides' >> beam.core.MapTuple(fn, 's1', 's2'),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='StaticSidesCheck')
assert_that(
pcoll | 'DynamicSides' >> beam.core.MapTuple(fn, side1, side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='DynamicSidesCheck')
assert_that(
pcoll | 'MixedSides' >> beam.core.MapTuple(fn, s2=side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, 's2')]),
label='MixedSidesCheck')
def test_flatmaptuple_builtin(self):
with TestPipeline() as pipeline:
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(
pcoll | 'NoSides' >> beam.core.FlatMapTuple(fn),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, None]),
label='NoSidesCheck')
assert_that(
pcoll | 'StaticSides' >> beam.core.FlatMapTuple(fn, 's1', 's2'),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='StaticSidesCheck')
assert_that(
pcoll
| 'DynamicSides' >> beam.core.FlatMapTuple(fn, side1, side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='DynamicSidesCheck')
assert_that(
pcoll | 'MixedSides' >> beam.core.FlatMapTuple(fn, s2=side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, 's2']),
label='MixedSidesCheck')
def test_create_singleton_pcollection(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'label' >> Create([[1, 2, 3]])
assert_that(pcoll, equal_to([[1, 2, 3]]))
def test_visit_entire_graph(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll' >> beam.Impulse()
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
pcoll4 = pcoll2 | 'do3' >> FlatMap(lambda x: [x + 1])
transform = PipelineTest.CustomTransform()
pcoll5 = pcoll4 | transform
visitor = PipelineTest.Visitor(visited=[])
pipeline.visit(visitor)
self.assertEqual({pcoll1, pcoll2, pcoll3, pcoll4, pcoll5},
set(visitor.visited))
self.assertEqual(set(visitor.enter_composite), set(visitor.leave_composite))
self.assertEqual(2, len(visitor.enter_composite))
self.assertEqual(visitor.enter_composite[1].transform, transform)
self.assertEqual(visitor.leave_composite[0].transform, transform)
def test_apply_custom_transform(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'pcoll' >> Create([1, 2, 3])
result = pcoll | PipelineTest.CustomTransform()
assert_that(result, equal_to([2, 3, 4]))
def test_reuse_custom_transform_instance(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pcoll2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
pcoll1 | transform
with self.assertRaises(RuntimeError) as cm:
pipeline.apply(transform, pcoll2)
self.assertEqual(
cm.exception.args[0],
'A transform with label "CustomTransform" already exists in the '
'pipeline. To apply a transform with a specified label write '
'pvalue | "label" >> transform')
def test_reuse_cloned_custom_transform_instance(self):
with TestPipeline() as pipeline:
pcoll1 = pipeline | 'pc1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pc2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
result1 = pcoll1 | transform
result2 = pcoll2 | 'new_label' >> transform
assert_that(result1, equal_to([2, 3, 4]), label='r1')
assert_that(result2, equal_to([5, 6, 7]), label='r2')
def test_transform_no_super_init(self):
class AddSuffix(PTransform):
def __init__(self, suffix):
# No call to super(...).__init__
self.suffix = suffix
def expand(self, pcoll):
return pcoll | Map(lambda x: x + self.suffix)
self.assertEqual(['a-x', 'b-x', 'c-x'],
sorted(['a', 'b', 'c'] | 'AddSuffix' >> AddSuffix('-x')))
@unittest.skip("Fails on some platforms with new urllib3.")
def test_memory_usage(self):
try:
import resource
except ImportError:
# Skip the test if resource module is not available (e.g. non-Unix os).
self.skipTest('resource module not available.')
if platform.mac_ver()[0]:
# Skip the test on macos, depending on version it returns ru_maxrss in
# different units.
self.skipTest('ru_maxrss is not in standard units.')
def get_memory_usage_in_bytes():
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * (2**10)
def check_memory(value, memory_threshold):
memory_usage = get_memory_usage_in_bytes()
if memory_usage > memory_threshold:
raise RuntimeError(
'High memory usage: %d > %d' % (memory_usage, memory_threshold))
return value
len_elements = 1000000
num_elements = 10
num_maps = 100
# TODO(robertwb): reduce memory usage of FnApiRunner so that this test
# passes.
with TestPipeline(runner='BundleBasedDirectRunner') as pipeline:
# Consumed memory should not be proportional to the number of maps.
memory_threshold = (
get_memory_usage_in_bytes() + (5 * len_elements * num_elements))
# Plus small additional slack for memory fluctuations during the test.
memory_threshold += 10 * (2**20)
biglist = pipeline | 'oom:create' >> Create(
['x' * len_elements] * num_elements)
for i in range(num_maps):
biglist = biglist | ('oom:addone-%d' % i) >> Map(lambda x: x + 'y')
result = biglist | 'oom:check' >> Map(check_memory, memory_threshold)
assert_that(
result,
equal_to(['x' * len_elements + 'y' * num_maps] * num_elements))
def test_aggregator_empty_input(self):
actual = [] | CombineGlobally(max).without_defaults()
self.assertEqual(actual, [])
def test_pipeline_as_context(self):
def raise_exception(exn):
raise exn
with self.assertRaises(ValueError):
with Pipeline() as p:
# pylint: disable=expression-not-assigned
p | Create([ValueError('msg')]) | Map(raise_exception)
def test_ptransform_overrides(self):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
ptransform = applied_ptransform.transform
if isinstance(ptransform, DoubleParDo):
return TripleParDo()
raise ValueError('Unsupported type of transform: %r' % ptransform)
p = Pipeline()
pcoll = p | beam.Create([1, 2, 3]) | 'Multiply' >> DoubleParDo()
assert_that(pcoll, equal_to([3, 6, 9]))
p.replace_all([MyParDoOverride()])
p.run()
def test_ptransform_override_type_hints(self):
class NoTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
return ToStringParDo()
class WithTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
return ToStringParDo().with_input_types(int).with_output_types(str)
for override, expected_type in [(NoTypeHintOverride(), int),
(WithTypeHintOverride(), str)]:
p = TestPipeline()
pcoll = (
p
| beam.Create([1, 2, 3])
| 'Operate' >> DoubleParDo()
| 'NoOp' >> beam.Map(lambda x: x))
p.replace_all([override])
self.assertEqual(pcoll.producer.inputs[0].element_type, expected_type)
def test_ptransform_override_multiple_inputs(self):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, FlattenAndDouble)
def get_replacement_transform(self, applied_ptransform):
return FlattenAndTriple()
p = Pipeline()
pcoll1 = p | 'pc1' >> beam.Create([1, 2, 3])
pcoll2 = p | 'pc2' >> beam.Create([4, 5, 6])
pcoll3 = (pcoll1, pcoll2) | 'FlattenAndMultiply' >> FlattenAndDouble()
assert_that(pcoll3, equal_to([3, 6, 9, 12, 15, 18]))
p.replace_all([MyParDoOverride()])
p.run()
def test_ptransform_override_side_inputs(self):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return (
isinstance(applied_ptransform.transform, ParDo) and
isinstance(applied_ptransform.transform.fn, AddWithProductDoFn))
def get_replacement_transform(self, transform):
return AddThenMultiply()
p = Pipeline()
pcoll1 = p | 'pc1' >> beam.Create([2])
pcoll2 = p | 'pc2' >> beam.Create([3])
pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])
result = pcoll3 | 'Operate' >> beam.ParDo(
AddWithProductDoFn(), AsSingleton(pcoll1), AsSingleton(pcoll2))
assert_that(result, equal_to([18, 21, 24]))
p.replace_all([MyParDoOverride()])
p.run()
def test_ptransform_override_replacement_inputs(self):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return (
isinstance(applied_ptransform.transform, ParDo) and
isinstance(applied_ptransform.transform.fn, AddWithProductDoFn))
def get_replacement_transform(self, transform):
return AddThenMultiply()
def get_replacement_inputs(self, applied_ptransform):
assert len(applied_ptransform.inputs) == 1
assert len(applied_ptransform.side_inputs) == 2
# Swap the order of the two side inputs
return (
applied_ptransform.inputs[0],
applied_ptransform.side_inputs[1].pvalue,
applied_ptransform.side_inputs[0].pvalue)
p = Pipeline()
pcoll1 = p | 'pc1' >> beam.Create([2])
pcoll2 = p | 'pc2' >> beam.Create([3])
pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])
result = pcoll3 | 'Operate' >> beam.ParDo(
AddWithProductDoFn(), AsSingleton(pcoll1), AsSingleton(pcoll2))
assert_that(result, equal_to([14, 16, 18]))
p.replace_all([MyParDoOverride()])
p.run()
def test_ptransform_override_multiple_outputs(self):
class MultiOutputComposite(PTransform):
def __init__(self):
self.output_tags = set()
def expand(self, pcoll):
def mux_input(x):
x = x * 2
if isinstance(x, int):
yield TaggedOutput('numbers', x)
else:
yield TaggedOutput('letters', x)
multi = pcoll | 'MyReplacement' >> beam.ParDo(mux_input).with_outputs()
letters = multi.letters | 'LettersComposite' >> beam.Map(
lambda x: x * 3)
numbers = multi.numbers | 'NumbersComposite' >> beam.Map(
lambda x: x * 5)
return {
'letters': letters,
'numbers': numbers,
}
class MultiOutputOverride(PTransformOverride):
def matches(self, applied_ptransform):
return applied_ptransform.full_label == 'MyMultiOutput'
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
return MultiOutputComposite()
def mux_input(x):
if isinstance(x, int):
yield TaggedOutput('numbers', x)
else:
yield TaggedOutput('letters', x)
with TestPipeline() as p:
multi = (
p
| beam.Create([1, 2, 3, 'a', 'b', 'c'])
| 'MyMultiOutput' >> beam.ParDo(mux_input).with_outputs())
letters = multi.letters | 'MyLetters' >> beam.Map(lambda x: x)
numbers = multi.numbers | 'MyNumbers' >> beam.Map(lambda x: x)
# Assert that the PCollection replacement worked correctly and that
# elements are flowing through. The replacement transform first
# multiples by 2 then the leaf nodes inside the composite multiply by
# an additional 3 and 5. Use prime numbers to ensure that each
# transform is getting executed once.
assert_that(
letters,
equal_to(['a' * 2 * 3, 'b' * 2 * 3, 'c' * 2 * 3]),
label='assert letters')
assert_that(
numbers,
equal_to([1 * 2 * 5, 2 * 2 * 5, 3 * 2 * 5]),
label='assert numbers')
# Do the replacement and run the element assertions.
p.replace_all([MultiOutputOverride()])
# The following checks the graph to make sure the replacement occurred.
visitor = PipelineTest.Visitor(visited=[])
p.visit(visitor)
pcollections = visitor.visited
composites = visitor.enter_composite
# Assert the replacement is in the composite list and retrieve the
# AppliedPTransform.
self.assertIn(
MultiOutputComposite, [t.transform.__class__ for t in composites])
multi_output_composite = list(
filter(
lambda t: t.transform.__class__ == MultiOutputComposite,
composites))[0]
# Assert that all of the replacement PCollections are in the graph.
for output in multi_output_composite.outputs.values():
self.assertIn(output, pcollections)
# Assert that all of the "old"/replaced PCollections are not in the graph.
self.assertNotIn(multi[None], visitor.visited)
self.assertNotIn(multi.letters, visitor.visited)
self.assertNotIn(multi.numbers, visitor.visited)
def test_kv_ptransform_honor_type_hints(self):
# The return type of this DoFn cannot be inferred by the default
# Beam type inference
class StatefulDoFn(DoFn):
BYTES_STATE = BagStateSpec('bytes', BytesCoder())
def return_recursive(self, count):
if count == 0:
return ["some string"]
else:
self.return_recursive(count - 1)
def process(self, element, counter=DoFn.StateParam(BYTES_STATE)):
return self.return_recursive(1)
with TestPipeline() as p:
pcoll = (
p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()))
self.assertEqual(pcoll.element_type, typehints.Any)
with TestPipeline() as p:
pcoll = (
p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()).with_output_types(str))
self.assertEqual(pcoll.element_type, str)
def test_track_pcoll_unbounded(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'read' >> Read(FakeUnboundedSource())
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
self.assertIs(pcoll1.is_bounded, False)
self.assertIs(pcoll2.is_bounded, False)
self.assertIs(pcoll3.is_bounded, False)
def test_track_pcoll_bounded(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'label1' >> Create([1, 2, 3])
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
self.assertIs(pcoll1.is_bounded, True)
self.assertIs(pcoll2.is_bounded, True)
self.assertIs(pcoll3.is_bounded, True)
def test_track_pcoll_bounded_flatten(self):
pipeline = TestPipeline()
pcoll1_a = pipeline | 'label_a' >> Create([1, 2, 3])
pcoll2_a = pcoll1_a | 'do_a' >> FlatMap(lambda x: [x + 1])
pcoll1_b = pipeline | 'label_b' >> Create([1, 2, 3])
pcoll2_b = pcoll1_b | 'do_b' >> FlatMap(lambda x: [x + 1])
merged = (pcoll2_a, pcoll2_b) | beam.Flatten()
self.assertIs(pcoll1_a.is_bounded, True)
self.assertIs(pcoll2_a.is_bounded, True)
self.assertIs(pcoll1_b.is_bounded, True)
self.assertIs(pcoll2_b.is_bounded, True)
self.assertIs(merged.is_bounded, True)
def test_track_pcoll_unbounded_flatten(self):
pipeline = TestPipeline()
pcoll1_bounded = pipeline | 'label1' >> Create([1, 2, 3])
pcoll2_bounded = pcoll1_bounded | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll1_unbounded = pipeline | 'read' >> Read(FakeUnboundedSource())
pcoll2_unbounded = pcoll1_unbounded | 'do2' >> FlatMap(lambda x: [x + 1])
merged = (pcoll2_bounded, pcoll2_unbounded) | beam.Flatten()
self.assertIs(pcoll1_bounded.is_bounded, True)
self.assertIs(pcoll2_bounded.is_bounded, True)
self.assertIs(pcoll1_unbounded.is_bounded, False)
self.assertIs(pcoll2_unbounded.is_bounded, False)
self.assertIs(merged.is_bounded, False)
def test_incompatible_submission_and_runtime_envs_fail_pipeline(self):
with mock.patch(
'apache_beam.transforms.environments.sdk_base_version_capability'
) as base_version:
base_version.side_effect = [
f"beam:version:sdk_base:apache/beam_python3.5_sdk:2.{i}.0"
for i in range(100)
]
with self.assertRaisesRegex(
RuntimeError,
'Pipeline construction environment and pipeline runtime '
'environment are not compatible.'):
with TestPipeline() as p:
_ = p | Create([None])
class DoFnTest(unittest.TestCase):
def test_element(self):
class TestDoFn(DoFn):
def process(self, element):
yield element + 10
with TestPipeline() as pipeline:
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([11, 12]))
def test_side_input_no_tag(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix):
return ['%s-%s-%s' % (prefix, element, suffix)]
with TestPipeline() as pipeline:
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
def test_side_input_tagged(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix=DoFn.SideInputParam):
return ['%s-%s-%s' % (prefix, element, suffix)]
with TestPipeline() as pipeline:
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
@pytest.mark.it_validatesrunner
def test_element_param(self):
pipeline = TestPipeline()
input = [1, 2]
pcoll = (
pipeline
| 'Create' >> Create(input)
| 'Ele param' >> Map(lambda element=DoFn.ElementParam: element))
assert_that(pcoll, equal_to(input))
pipeline.run()
@pytest.mark.it_validatesrunner
def test_key_param(self):
pipeline = TestPipeline()
pcoll = (
pipeline
| 'Create' >> Create([('a', 1), ('b', 2)])
| 'Key param' >> Map(lambda _, key=DoFn.KeyParam: key))
assert_that(pcoll, equal_to(['a', 'b']))
pipeline.run()
def test_window_param(self):
class TestDoFn(DoFn):
def process(self, element, window=DoFn.WindowParam):
yield (element, (float(window.start), float(window.end)))
with TestPipeline() as pipeline:
pcoll = (
pipeline
| Create([1, 7])
| Map(lambda x: TimestampedValue(x, x))
| WindowInto(windowfn=SlidingWindows(10, 5))
| ParDo(TestDoFn()))
assert_that(
pcoll,
equal_to([(1, (-5, 5)), (1, (0, 10)), (7, (0, 10)), (7, (5, 15))]))
pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn())
assert_that(
pcoll2,
equal_to([((1, (-5, 5)), (-5, 5)), ((1, (0, 10)), (0, 10)),
((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5, 15))]),
label='doubled windows')
def test_timestamp_param(self):
class TestDoFn(DoFn):
def process(self, element, timestamp=DoFn.TimestampParam):
yield timestamp
with TestPipeline() as pipeline:
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
def test_timestamp_param_map(self):
with TestPipeline() as p:
assert_that(
p | Create([1, 2]) | beam.Map(lambda _, t=DoFn.TimestampParam: t),
equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
def test_pane_info_param(self):
with TestPipeline() as p:
pc = p | Create([(None, None)])
assert_that(
pc | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),
equal_to([windowed_value.PANE_INFO_UNKNOWN]),
label='CheckUngrouped')
assert_that(
pc | beam.GroupByKey() | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),
equal_to([
windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)
]),
label='CheckGrouped')
def test_incomparable_default(self):
class IncomparableType(object):
def __eq__(self, other):
raise RuntimeError()
def __ne__(self, other):
raise RuntimeError()
def __hash__(self):
raise RuntimeError()
# Ensure that we don't use default values in a context where they must be
# comparable (see BEAM-8301).
with TestPipeline() as pipeline:
pcoll = (
pipeline
| beam.Create([None])
| Map(lambda e, x=IncomparableType(): (e, type(x).__name__)))
assert_that(pcoll, equal_to([(None, 'IncomparableType')]))
class Bacon(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--slices', type=int)
class Eggs(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--style', default='scrambled')
class Breakfast(Bacon, Eggs):
pass
class PipelineOptionsTest(unittest.TestCase):
def test_flag_parsing(self):
options = Breakfast(['--slices=3', '--style=sunny side up', '--ignored'])
self.assertEqual(3, options.slices)
self.assertEqual('sunny side up', options.style)
def test_keyword_parsing(self):
options = Breakfast(['--slices=3', '--style=sunny side up', '--ignored'],
slices=10)
self.assertEqual(10, options.slices)
self.assertEqual('sunny side up', options.style)
def test_attribute_setting(self):
options = Breakfast(slices=10)
self.assertEqual(10, options.slices)
options.slices = 20
self.assertEqual(20, options.slices)
def test_view_as(self):
generic_options = PipelineOptions(['--slices=3'])
self.assertEqual(3, generic_options.view_as(Bacon).slices)
self.assertEqual(3, generic_options.view_as(Breakfast).slices)
generic_options.view_as(Breakfast).slices = 10
self.assertEqual(10, generic_options.view_as(Bacon).slices)
with self.assertRaises(AttributeError):
generic_options.slices # pylint: disable=pointless-statement
with self.assertRaises(AttributeError):
generic_options.view_as(Eggs).slices # pylint: disable=expression-not-assigned
def test_defaults(self):
options = Breakfast(['--slices=3'])
self.assertEqual(3, options.slices)
self.assertEqual('scrambled', options.style)
def test_dir(self):
options = Breakfast()
self.assertEqual({
'from_dictionary',
'get_all_options',
'slices',
'style',
'view_as',
'display_data'
},
{
attr
for attr in dir(options)
if not attr.startswith('_') and attr != 'next'
})
self.assertEqual({
'from_dictionary',
'get_all_options',
'style',
'view_as',
'display_data'
},
{
attr
for attr in dir(options.view_as(Eggs))
if not attr.startswith('_') and attr != 'next'
})
class RunnerApiTest(unittest.TestCase):
def test_parent_pointer(self):
class MyPTransform(beam.PTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
p = beam.Pipeline()
p | MyPTransform() # pylint: disable=expression-not-assigned
p = Pipeline.from_runner_api(
Pipeline.to_runner_api(p, use_fake_coders=True), None, None)
self.assertIsNotNone(p.transforms_stack[0].parts[0].parent)
self.assertEqual(
p.transforms_stack[0].parts[0].parent, p.transforms_stack[0])
def test_requirements(self):
p = beam.Pipeline()
_ = (
p | beam.Create([])
| beam.ParDo(lambda x, finalize=beam.DoFn.BundleFinalizerParam: None))
proto = p.to_runner_api()
self.assertTrue(
common_urns.requirements.REQUIRES_BUNDLE_FINALIZATION.urn,
proto.requirements)
def test_annotations(self):
some_proto = BytesCoder().to_runner_api(None)
class EmptyTransform(beam.PTransform):
def expand(self, pcoll):
return pcoll
def annotations(self):
return {'foo': 'some_string'}
class NonEmptyTransform(beam.PTransform):
def expand(self, pcoll):
return pcoll | beam.Map(lambda x: x)
def annotations(self):
return {
'foo': b'some_bytes',
'proto': some_proto,
}
p = beam.Pipeline()
_ = p | beam.Create([]) | EmptyTransform() | NonEmptyTransform()
proto = p.to_runner_api()
seen = 0
for transform in proto.components.transforms.values():
if transform.unique_name == 'EmptyTransform':
seen += 1
self.assertEqual(transform.annotations['foo'], b'some_string')
elif transform.unique_name == 'NonEmptyTransform':
seen += 1
self.assertEqual(transform.annotations['foo'], b'some_bytes')
self.assertEqual(
transform.annotations['proto'], some_proto.SerializeToString())
self.assertEqual(seen, 2)
def test_transform_ids(self):
class MyPTransform(beam.PTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
p = beam.Pipeline()
p | MyPTransform() # pylint: disable=expression-not-assigned
runner_api_proto = Pipeline.to_runner_api(p)
for transform_id in runner_api_proto.components.transforms:
self.assertRegex(transform_id, r'[a-zA-Z0-9-_]+')
def test_input_names(self):
class MyPTransform(beam.PTransform):
def expand(self, pcolls):
return pcolls.values() | beam.Flatten()
p = beam.Pipeline()
input_names = set('ABC')
inputs = {x: p | x >> beam.Create([x]) for x in input_names}
inputs | MyPTransform() # pylint: disable=expression-not-assigned
runner_api_proto = Pipeline.to_runner_api(p)
for transform_proto in runner_api_proto.components.transforms.values():
if transform_proto.unique_name == 'MyPTransform':
self.assertEqual(set(transform_proto.inputs.keys()), input_names)
break
else:
self.fail('Unable to find transform.')
def test_display_data(self):
class MyParentTransform(beam.PTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
def display_data(self): # type: () -> dict
parent_dd = super().display_data()
parent_dd['p_dd_string'] = DisplayDataItem(
'p_dd_string_value', label='p_dd_string_label')
parent_dd['p_dd_string_2'] = DisplayDataItem('p_dd_string_value_2')
parent_dd['p_dd_bool'] = DisplayDataItem(True, label='p_dd_bool_label')
parent_dd['p_dd_int'] = DisplayDataItem(1, label='p_dd_int_label')
return parent_dd
class MyPTransform(MyParentTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
def display_data(self): # type: () -> dict
parent_dd = super().display_data()
parent_dd['dd_string'] = DisplayDataItem(
'dd_string_value', label='dd_string_label')
parent_dd['dd_string_2'] = DisplayDataItem('dd_string_value_2')
parent_dd['dd_bool'] = DisplayDataItem(False, label='dd_bool_label')
parent_dd['dd_double'] = DisplayDataItem(1.1, label='dd_double_label')
return parent_dd
p = beam.Pipeline()
p | MyPTransform() # pylint: disable=expression-not-assigned
proto_pipeline = Pipeline.to_runner_api(p, use_fake_coders=True)
my_transform, = [
transform
for transform in proto_pipeline.components.transforms.values()
if transform.unique_name == 'MyPTransform'
]
self.assertIsNotNone(my_transform)
self.assertListEqual(
list(my_transform.display_data),
[
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='p_dd_string_label',
key='p_dd_string',
namespace='apache_beam.pipeline_test.MyPTransform',
string_value='p_dd_string_value').SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='p_dd_string_2',
key='p_dd_string_2',
namespace='apache_beam.pipeline_test.MyPTransform',
string_value='p_dd_string_value_2').SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='p_dd_bool_label',
key='p_dd_bool',
namespace='apache_beam.pipeline_test.MyPTransform',
bool_value=True).SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='p_dd_int_label',
key='p_dd_int',
namespace='apache_beam.pipeline_test.MyPTransform',
int_value=1).SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='dd_string_label',
key='dd_string',
namespace='apache_beam.pipeline_test.MyPTransform',
string_value='dd_string_value').SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='dd_string_2',
key='dd_string_2',
namespace='apache_beam.pipeline_test.MyPTransform',
string_value='dd_string_value_2').SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='dd_bool_label',
key='dd_bool',
namespace='apache_beam.pipeline_test.MyPTransform',
bool_value=False).SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='dd_double_label',
key='dd_double',
namespace='apache_beam.pipeline_test.MyPTransform',
double_value=1.1).SerializeToString()),
])
def test_runner_api_roundtrip_preserves_resource_hints(self):
p = beam.Pipeline()
_ = (
p | beam.Create([1, 2])
| beam.Map(lambda x: x + 1).with_resource_hints(accelerator='gpu'))
self.assertEqual(
p.transforms_stack[0].parts[1].transform.get_resource_hints(),
{common_urns.resource_hints.ACCELERATOR.urn: b'gpu'})
for _ in range(3):
# Verify that DEFAULT environments are recreated during multiple RunnerAPI
# translation and hints don't get lost.
p = Pipeline.from_runner_api(Pipeline.to_runner_api(p), None, None)
self.assertEqual(
p.transforms_stack[0].parts[1].transform.get_resource_hints(),
{common_urns.resource_hints.ACCELERATOR.urn: b'gpu'})
def test_hints_on_composite_transforms_are_propagated_to_subtransforms(self):
class FooHint(ResourceHint):
urn = 'foo_urn'
class BarHint(ResourceHint):
urn = 'bar_urn'
class BazHint(ResourceHint):
urn = 'baz_urn'
class QuxHint(ResourceHint):
urn = 'qux_urn'
class UseMaxValueHint(ResourceHint):
urn = 'use_max_value_urn'
@classmethod
def get_merged_value(
cls, outer_value, inner_value): # type: (bytes, bytes) -> bytes
return ResourceHint._use_max(outer_value, inner_value)
ResourceHint.register_resource_hint('foo_hint', FooHint)
ResourceHint.register_resource_hint('bar_hint', BarHint)
ResourceHint.register_resource_hint('baz_hint', BazHint)
ResourceHint.register_resource_hint('qux_hint', QuxHint)
ResourceHint.register_resource_hint('use_max_value_hint', UseMaxValueHint)
@beam.ptransform_fn
def SubTransform(pcoll):
return pcoll | beam.Map(lambda x: x + 1).with_resource_hints(
foo_hint='set_on_subtransform', use_max_value_hint='10')
@beam.ptransform_fn
def CompositeTransform(pcoll):
return pcoll | beam.Map(lambda x: x * 2) | SubTransform()
p = beam.Pipeline()
_ = (
p | beam.Create([1, 2])
| CompositeTransform().with_resource_hints(
foo_hint='should_be_overriden_by_subtransform',
bar_hint='set_on_composite',
baz_hint='set_on_composite',
use_max_value_hint='100'))
options = PortableOptions([
'--resource_hint=baz_hint=should_be_overriden_by_composite',
'--resource_hint=qux_hint=set_via_options',
'--environment_type=PROCESS',
'--environment_option=process_command=foo',
'--sdk_location=container',
])
environment = ProcessEnvironment.from_options(options)
proto = Pipeline.to_runner_api(p, default_environment=environment)
for t in proto.components.transforms.values():
if "CompositeTransform/SubTransform/Map" in t.unique_name:
environment = proto.components.environments.get(t.environment_id)
self.assertEqual(
environment.resource_hints.get('foo_urn'), b'set_on_subtransform')
self.assertEqual(
environment.resource_hints.get('bar_urn'), b'set_on_composite')
self.assertEqual(
environment.resource_hints.get('baz_urn'), b'set_on_composite')
self.assertEqual(
environment.resource_hints.get('qux_urn'), b'set_via_options')
self.assertEqual(
environment.resource_hints.get('use_max_value_urn'), b'100')
found = True
assert found
def test_environments_with_same_resource_hints_are_reused(self):
class HintX(ResourceHint):
urn = 'X_urn'
class HintY(ResourceHint):
urn = 'Y_urn'
class HintIsOdd(ResourceHint):
urn = 'IsOdd_urn'
ResourceHint.register_resource_hint('X', HintX)
ResourceHint.register_resource_hint('Y', HintY)
ResourceHint.register_resource_hint('IsOdd', HintIsOdd)
p = beam.Pipeline()
num_iter = 4
for i in range(num_iter):
_ = (
p
| f'NoHintCreate_{i}' >> beam.Create([1, 2])
| f'NoHint_{i}' >> beam.Map(lambda x: x + 1))
_ = (
p
| f'XCreate_{i}' >> beam.Create([1, 2])
|
f'HintX_{i}' >> beam.Map(lambda x: x + 1).with_resource_hints(X='X'))
_ = (
p
| f'XYCreate_{i}' >> beam.Create([1, 2])
| f'HintXY_{i}' >> beam.Map(lambda x: x + 1).with_resource_hints(
X='X', Y='Y'))
_ = (
p
| f'IsOddCreate_{i}' >> beam.Create([1, 2])
| f'IsOdd_{i}' >>
beam.Map(lambda x: x + 1).with_resource_hints(IsOdd=str(i % 2 != 0)))
proto = Pipeline.to_runner_api(p)
count_x = count_xy = count_is_odd = count_no_hints = 0
env_ids = set()
for _, t in proto.components.transforms.items():
env = proto.components.environments[t.environment_id]
if t.unique_name.startswith('HintX_'):
count_x += 1
env_ids.add(t.environment_id)
self.assertEqual(env.resource_hints, {'X_urn': b'X'})
if t.unique_name.startswith('HintXY_'):
count_xy += 1
env_ids.add(t.environment_id)
self.assertEqual(env.resource_hints, {'X_urn': b'X', 'Y_urn': b'Y'})
if t.unique_name.startswith('NoHint_'):
count_no_hints += 1
env_ids.add(t.environment_id)
self.assertEqual(env.resource_hints, {})
if t.unique_name.startswith('IsOdd_'):
count_is_odd += 1
env_ids.add(t.environment_id)
self.assertTrue(
env.resource_hints == {'IsOdd_urn': b'True'} or
env.resource_hints == {'IsOdd_urn': b'False'})
assert count_x == count_is_odd == count_xy == count_no_hints == num_iter
assert num_iter > 1
self.assertEqual(len(env_ids), 5)
def test_multiple_application_of_the_same_transform_set_different_hints(self):
class FooHint(ResourceHint):
urn = 'foo_urn'
class UseMaxValueHint(ResourceHint):
urn = 'use_max_value_urn'
@classmethod
def get_merged_value(
cls, outer_value, inner_value): # type: (bytes, bytes) -> bytes
return ResourceHint._use_max(outer_value, inner_value)
ResourceHint.register_resource_hint('foo_hint', FooHint)
ResourceHint.register_resource_hint('use_max_value_hint', UseMaxValueHint)
@beam.ptransform_fn
def SubTransform(pcoll):
return pcoll | beam.Map(lambda x: x + 1)
@beam.ptransform_fn
def CompositeTransform(pcoll):
sub = SubTransform()
return (
pcoll
| 'first' >> sub.with_resource_hints(foo_hint='first_application')
| 'second' >> sub.with_resource_hints(foo_hint='second_application'))
p = beam.Pipeline()
_ = (p | beam.Create([1, 2]) | CompositeTransform())
proto = Pipeline.to_runner_api(p)
count = 0
for t in proto.components.transforms.values():
if "CompositeTransform/first/Map" in t.unique_name:
environment = proto.components.environments.get(t.environment_id)
self.assertEqual(
b'first_application', environment.resource_hints.get('foo_urn'))
count += 1
if "CompositeTransform/second/Map" in t.unique_name:
environment = proto.components.environments.get(t.environment_id)
self.assertEqual(
b'second_application', environment.resource_hints.get('foo_urn'))
count += 1
assert count == 2
def test_environments_are_deduplicated(self):
def file_artifact(path, hash, staged_name):
return beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=path, sha256=hash).SerializeToString(),
role_urn=common_urns.artifact_roles.STAGING_TO.urn,
role_payload=beam_runner_api_pb2.ArtifactStagingToRolePayload(
staged_name=staged_name).SerializeToString(),
)
proto = beam_runner_api_pb2.Pipeline(
components=beam_runner_api_pb2.Components(
transforms={
f'transform{ix}': beam_runner_api_pb2.PTransform(
environment_id=f'e{ix}')
for ix in range(8)
},
environments={
# Same hash and destination.
'e1': beam_runner_api_pb2.Environment(
dependencies=[file_artifact('a1', 'x', 'dest')]),
'e2': beam_runner_api_pb2.Environment(
dependencies=[file_artifact('a2', 'x', 'dest')]),
# Different hash.
'e3': beam_runner_api_pb2.Environment(
dependencies=[file_artifact('a3', 'y', 'dest')]),
# Different destination.
'e4': beam_runner_api_pb2.Environment(
dependencies=[file_artifact('a4', 'y', 'dest2')]),
# Multiple files with same hash and destinations.
'e5': beam_runner_api_pb2.Environment(
dependencies=[
file_artifact('a1', 'x', 'dest'),
file_artifact('b1', 'xb', 'destB')
]),
'e6': beam_runner_api_pb2.Environment(
dependencies=[
file_artifact('a2', 'x', 'dest'),
file_artifact('b2', 'xb', 'destB')
]),
# Overlapping, but not identical, files.
'e7': beam_runner_api_pb2.Environment(
dependencies=[
file_artifact('a1', 'x', 'dest'),
file_artifact('b2', 'y', 'destB')
]),
# Same files as first, but differing other properties.
'e0': beam_runner_api_pb2.Environment(
resource_hints={'hint': b'value'},
dependencies=[file_artifact('a1', 'x', 'dest')]),
}))
Pipeline.merge_compatible_environments(proto)
# These environments are equivalent.
self.assertEqual(
proto.components.transforms['transform1'].environment_id,
proto.components.transforms['transform2'].environment_id)
self.assertEqual(
proto.components.transforms['transform5'].environment_id,
proto.components.transforms['transform6'].environment_id)
# These are not.
self.assertNotEqual(
proto.components.transforms['transform1'].environment_id,
proto.components.transforms['transform3'].environment_id)
self.assertNotEqual(
proto.components.transforms['transform4'].environment_id,
proto.components.transforms['transform3'].environment_id)
self.assertNotEqual(
proto.components.transforms['transform6'].environment_id,
proto.components.transforms['transform7'].environment_id)
self.assertNotEqual(
proto.components.transforms['transform1'].environment_id,
proto.components.transforms['transform0'].environment_id)
self.assertEqual(len(proto.components.environments), 6)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "edc7c74a19a272bdd6da81b3ce2d214a2b613984",
"index": 5835,
"step-1": "<mask token>\n\n\nclass PipelineTest(unittest.TestCase):\n <mask token>\n\n\n class CustomTransform(PTransform):\n\n def expand(self, pcoll):\n return pcoll | '+1' >> FlatMap(lambda x: [x + 1])\n\n\n class Visitor(PipelineVisitor):\n\n def __init__(self, visited):\n self.visited = visited\n self.enter_composite = []\n self.leave_composite = []\n\n def visit_value(self, value, _):\n self.visited.append(value)\n\n def enter_composite_transform(self, transform_node):\n self.enter_composite.append(transform_node)\n\n def leave_composite_transform(self, transform_node):\n self.leave_composite.append(transform_node)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_pipeline_as_context(self):\n\n def raise_exception(exn):\n raise exn\n with self.assertRaises(ValueError):\n with Pipeline() as p:\n p | Create([ValueError('msg')]) | Map(raise_exception)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DoFnTest(unittest.TestCase):\n\n def test_element(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element):\n yield element + 10\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(\n TestDoFn())\n assert_that(pcoll, equal_to([11, 12]))\n\n def test_side_input_no_tag(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, prefix, suffix):\n return ['%s-%s-%s' % (prefix, element, suffix)]\n with TestPipeline() as pipeline:\n words_list = ['aa', 'bb', 'cc']\n words = pipeline | 'SomeWords' >> Create(words_list)\n prefix = 'zyx'\n suffix = pipeline | 'SomeString' >> Create(['xyz'])\n result = words | 'DecorateWordsDoFnNoTag' >> ParDo(TestDoFn(),\n prefix, suffix=AsSingleton(suffix))\n assert_that(result, equal_to([('zyx-%s-xyz' % x) for x in\n words_list]))\n\n def test_side_input_tagged(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, prefix, suffix=DoFn.SideInputParam):\n return ['%s-%s-%s' % (prefix, element, suffix)]\n with TestPipeline() as pipeline:\n words_list = ['aa', 'bb', 'cc']\n words = pipeline | 'SomeWords' >> Create(words_list)\n prefix = 'zyx'\n suffix = pipeline | 'SomeString' >> Create(['xyz'])\n result = words | 'DecorateWordsDoFnNoTag' >> ParDo(TestDoFn(),\n prefix, suffix=AsSingleton(suffix))\n assert_that(result, equal_to([('zyx-%s-xyz' % x) for x in\n words_list]))\n\n @pytest.mark.it_validatesrunner\n def test_element_param(self):\n pipeline = TestPipeline()\n input = [1, 2]\n pcoll = pipeline | 'Create' >> Create(input) | 'Ele param' >> Map(\n lambda element=DoFn.ElementParam: element)\n assert_that(pcoll, equal_to(input))\n pipeline.run()\n\n @pytest.mark.it_validatesrunner\n def test_key_param(self):\n pipeline = TestPipeline()\n pcoll = pipeline | 'Create' >> Create([('a', 1), ('b', 2)]\n ) | 'Key param' >> Map(lambda _, key=DoFn.KeyParam: key)\n assert_that(pcoll, equal_to(['a', 'b']))\n pipeline.run()\n\n def test_window_param(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, window=DoFn.WindowParam):\n yield element, (float(window.start), float(window.end))\n with TestPipeline() as pipeline:\n pcoll = pipeline | Create([1, 7]) | Map(lambda x:\n TimestampedValue(x, x)) | WindowInto(windowfn=\n SlidingWindows(10, 5)) | ParDo(TestDoFn())\n assert_that(pcoll, equal_to([(1, (-5, 5)), (1, (0, 10)), (7, (0,\n 10)), (7, (5, 15))]))\n pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn())\n assert_that(pcoll2, equal_to([((1, (-5, 5)), (-5, 5)), ((1, (0,\n 10)), (0, 10)), ((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5,\n 15))]), label='doubled windows')\n\n def test_timestamp_param(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, timestamp=DoFn.TimestampParam):\n yield timestamp\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(\n TestDoFn())\n assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))\n\n def test_timestamp_param_map(self):\n with TestPipeline() as p:\n assert_that(p | Create([1, 2]) | beam.Map(lambda _, t=DoFn.\n TimestampParam: t), equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))\n\n def test_pane_info_param(self):\n with TestPipeline() as p:\n pc = p | Create([(None, None)])\n assert_that(pc | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),\n equal_to([windowed_value.PANE_INFO_UNKNOWN]), label=\n 'CheckUngrouped')\n assert_that(pc | beam.GroupByKey() | beam.Map(lambda _, p=DoFn.\n PaneInfoParam: p), equal_to([windowed_value.PaneInfo(\n is_first=True, is_last=True, timing=windowed_value.\n PaneInfoTiming.ON_TIME, index=0, nonspeculative_index=0)]),\n label='CheckGrouped')\n\n def test_incomparable_default(self):\n\n\n class IncomparableType(object):\n\n def __eq__(self, other):\n raise RuntimeError()\n\n def __ne__(self, other):\n raise RuntimeError()\n\n def __hash__(self):\n raise RuntimeError()\n with TestPipeline() as pipeline:\n pcoll = pipeline | beam.Create([None]) | Map(lambda e, x=\n IncomparableType(): (e, type(x).__name__))\n assert_that(pcoll, equal_to([(None, 'IncomparableType')]))\n\n\nclass Bacon(PipelineOptions):\n\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument('--slices', type=int)\n\n\nclass Eggs(PipelineOptions):\n\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument('--style', default='scrambled')\n\n\nclass Breakfast(Bacon, Eggs):\n pass\n\n\nclass PipelineOptionsTest(unittest.TestCase):\n\n def test_flag_parsing(self):\n options = Breakfast(['--slices=3', '--style=sunny side up',\n '--ignored'])\n self.assertEqual(3, options.slices)\n self.assertEqual('sunny side up', options.style)\n\n def test_keyword_parsing(self):\n options = Breakfast(['--slices=3', '--style=sunny side up',\n '--ignored'], slices=10)\n self.assertEqual(10, options.slices)\n self.assertEqual('sunny side up', options.style)\n\n def test_attribute_setting(self):\n options = Breakfast(slices=10)\n self.assertEqual(10, options.slices)\n options.slices = 20\n self.assertEqual(20, options.slices)\n\n def test_view_as(self):\n generic_options = PipelineOptions(['--slices=3'])\n self.assertEqual(3, generic_options.view_as(Bacon).slices)\n self.assertEqual(3, generic_options.view_as(Breakfast).slices)\n generic_options.view_as(Breakfast).slices = 10\n self.assertEqual(10, generic_options.view_as(Bacon).slices)\n with self.assertRaises(AttributeError):\n generic_options.slices\n with self.assertRaises(AttributeError):\n generic_options.view_as(Eggs).slices\n\n def test_defaults(self):\n options = Breakfast(['--slices=3'])\n self.assertEqual(3, options.slices)\n self.assertEqual('scrambled', options.style)\n\n def test_dir(self):\n options = Breakfast()\n self.assertEqual({'from_dictionary', 'get_all_options', 'slices',\n 'style', 'view_as', 'display_data'}, {attr for attr in dir(\n options) if not attr.startswith('_') and attr != 'next'})\n self.assertEqual({'from_dictionary', 'get_all_options', 'style',\n 'view_as', 'display_data'}, {attr for attr in dir(options.\n view_as(Eggs)) if not attr.startswith('_') and attr != 'next'})\n\n\nclass RunnerApiTest(unittest.TestCase):\n\n def test_parent_pointer(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n p = beam.Pipeline()\n p | MyPTransform()\n p = Pipeline.from_runner_api(Pipeline.to_runner_api(p,\n use_fake_coders=True), None, None)\n self.assertIsNotNone(p.transforms_stack[0].parts[0].parent)\n self.assertEqual(p.transforms_stack[0].parts[0].parent, p.\n transforms_stack[0])\n\n def test_requirements(self):\n p = beam.Pipeline()\n _ = p | beam.Create([]) | beam.ParDo(lambda x, finalize=beam.DoFn.\n BundleFinalizerParam: None)\n proto = p.to_runner_api()\n self.assertTrue(common_urns.requirements.\n REQUIRES_BUNDLE_FINALIZATION.urn, proto.requirements)\n\n def test_annotations(self):\n some_proto = BytesCoder().to_runner_api(None)\n\n\n class EmptyTransform(beam.PTransform):\n\n def expand(self, pcoll):\n return pcoll\n\n def annotations(self):\n return {'foo': 'some_string'}\n\n\n class NonEmptyTransform(beam.PTransform):\n\n def expand(self, pcoll):\n return pcoll | beam.Map(lambda x: x)\n\n def annotations(self):\n return {'foo': b'some_bytes', 'proto': some_proto}\n p = beam.Pipeline()\n _ = p | beam.Create([]) | EmptyTransform() | NonEmptyTransform()\n proto = p.to_runner_api()\n seen = 0\n for transform in proto.components.transforms.values():\n if transform.unique_name == 'EmptyTransform':\n seen += 1\n self.assertEqual(transform.annotations['foo'], b'some_string')\n elif transform.unique_name == 'NonEmptyTransform':\n seen += 1\n self.assertEqual(transform.annotations['foo'], b'some_bytes')\n self.assertEqual(transform.annotations['proto'], some_proto\n .SerializeToString())\n self.assertEqual(seen, 2)\n\n def test_transform_ids(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n p = beam.Pipeline()\n p | MyPTransform()\n runner_api_proto = Pipeline.to_runner_api(p)\n for transform_id in runner_api_proto.components.transforms:\n self.assertRegex(transform_id, '[a-zA-Z0-9-_]+')\n\n def test_input_names(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, pcolls):\n return pcolls.values() | beam.Flatten()\n p = beam.Pipeline()\n input_names = set('ABC')\n inputs = {x: (p | x >> beam.Create([x])) for x in input_names}\n inputs | MyPTransform()\n runner_api_proto = Pipeline.to_runner_api(p)\n for transform_proto in runner_api_proto.components.transforms.values():\n if transform_proto.unique_name == 'MyPTransform':\n self.assertEqual(set(transform_proto.inputs.keys()),\n input_names)\n break\n else:\n self.fail('Unable to find transform.')\n\n def test_display_data(self):\n\n\n class MyParentTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n def display_data(self):\n parent_dd = super().display_data()\n parent_dd['p_dd_string'] = DisplayDataItem('p_dd_string_value',\n label='p_dd_string_label')\n parent_dd['p_dd_string_2'] = DisplayDataItem(\n 'p_dd_string_value_2')\n parent_dd['p_dd_bool'] = DisplayDataItem(True, label=\n 'p_dd_bool_label')\n parent_dd['p_dd_int'] = DisplayDataItem(1, label=\n 'p_dd_int_label')\n return parent_dd\n\n\n class MyPTransform(MyParentTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n def display_data(self):\n parent_dd = super().display_data()\n parent_dd['dd_string'] = DisplayDataItem('dd_string_value',\n label='dd_string_label')\n parent_dd['dd_string_2'] = DisplayDataItem('dd_string_value_2')\n parent_dd['dd_bool'] = DisplayDataItem(False, label=\n 'dd_bool_label')\n parent_dd['dd_double'] = DisplayDataItem(1.1, label=\n 'dd_double_label')\n return parent_dd\n p = beam.Pipeline()\n p | MyPTransform()\n proto_pipeline = Pipeline.to_runner_api(p, use_fake_coders=True)\n my_transform, = [transform for transform in proto_pipeline.\n components.transforms.values() if transform.unique_name ==\n 'MyPTransform']\n self.assertIsNotNone(my_transform)\n self.assertListEqual(list(my_transform.display_data), [\n beam_runner_api_pb2.DisplayData(urn=common_urns.\n StandardDisplayData.DisplayData.LABELLED.urn, payload=\n beam_runner_api_pb2.LabelledPayload(label='p_dd_string_label',\n key='p_dd_string', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'p_dd_string_value').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='p_dd_string_2', key='p_dd_string_2', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'p_dd_string_value_2').SerializeToString()),\n beam_runner_api_pb2.DisplayData(urn=common_urns.\n StandardDisplayData.DisplayData.LABELLED.urn, payload=\n beam_runner_api_pb2.LabelledPayload(label='p_dd_bool_label',\n key='p_dd_bool', namespace=\n 'apache_beam.pipeline_test.MyPTransform', bool_value=True).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'p_dd_int_label', key='p_dd_int', namespace=\n 'apache_beam.pipeline_test.MyPTransform', int_value=1).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'dd_string_label', key='dd_string', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'dd_string_value').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='dd_string_2', key='dd_string_2', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'dd_string_value_2').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='dd_bool_label', key='dd_bool', namespace=\n 'apache_beam.pipeline_test.MyPTransform', bool_value=False).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'dd_double_label', key='dd_double', namespace=\n 'apache_beam.pipeline_test.MyPTransform', double_value=1.1).\n SerializeToString())])\n\n def test_runner_api_roundtrip_preserves_resource_hints(self):\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | beam.Map(lambda x: x + 1\n ).with_resource_hints(accelerator='gpu')\n self.assertEqual(p.transforms_stack[0].parts[1].transform.\n get_resource_hints(), {common_urns.resource_hints.ACCELERATOR.\n urn: b'gpu'})\n for _ in range(3):\n p = Pipeline.from_runner_api(Pipeline.to_runner_api(p), None, None)\n self.assertEqual(p.transforms_stack[0].parts[1].transform.\n get_resource_hints(), {common_urns.resource_hints.\n ACCELERATOR.urn: b'gpu'})\n\n def test_hints_on_composite_transforms_are_propagated_to_subtransforms(self\n ):\n\n\n class FooHint(ResourceHint):\n urn = 'foo_urn'\n\n\n class BarHint(ResourceHint):\n urn = 'bar_urn'\n\n\n class BazHint(ResourceHint):\n urn = 'baz_urn'\n\n\n class QuxHint(ResourceHint):\n urn = 'qux_urn'\n\n\n class UseMaxValueHint(ResourceHint):\n urn = 'use_max_value_urn'\n\n @classmethod\n def get_merged_value(cls, outer_value, inner_value):\n return ResourceHint._use_max(outer_value, inner_value)\n ResourceHint.register_resource_hint('foo_hint', FooHint)\n ResourceHint.register_resource_hint('bar_hint', BarHint)\n ResourceHint.register_resource_hint('baz_hint', BazHint)\n ResourceHint.register_resource_hint('qux_hint', QuxHint)\n ResourceHint.register_resource_hint('use_max_value_hint',\n UseMaxValueHint)\n\n @beam.ptransform_fn\n def SubTransform(pcoll):\n return pcoll | beam.Map(lambda x: x + 1).with_resource_hints(\n foo_hint='set_on_subtransform', use_max_value_hint='10')\n\n @beam.ptransform_fn\n def CompositeTransform(pcoll):\n return pcoll | beam.Map(lambda x: x * 2) | SubTransform()\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | CompositeTransform().with_resource_hints(\n foo_hint='should_be_overriden_by_subtransform', bar_hint=\n 'set_on_composite', baz_hint='set_on_composite',\n use_max_value_hint='100')\n options = PortableOptions([\n '--resource_hint=baz_hint=should_be_overriden_by_composite',\n '--resource_hint=qux_hint=set_via_options',\n '--environment_type=PROCESS',\n '--environment_option=process_command=foo',\n '--sdk_location=container'])\n environment = ProcessEnvironment.from_options(options)\n proto = Pipeline.to_runner_api(p, default_environment=environment)\n for t in proto.components.transforms.values():\n if 'CompositeTransform/SubTransform/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(environment.resource_hints.get('foo_urn'),\n b'set_on_subtransform')\n self.assertEqual(environment.resource_hints.get('bar_urn'),\n b'set_on_composite')\n self.assertEqual(environment.resource_hints.get('baz_urn'),\n b'set_on_composite')\n self.assertEqual(environment.resource_hints.get('qux_urn'),\n b'set_via_options')\n self.assertEqual(environment.resource_hints.get(\n 'use_max_value_urn'), b'100')\n found = True\n assert found\n\n def test_environments_with_same_resource_hints_are_reused(self):\n\n\n class HintX(ResourceHint):\n urn = 'X_urn'\n\n\n class HintY(ResourceHint):\n urn = 'Y_urn'\n\n\n class HintIsOdd(ResourceHint):\n urn = 'IsOdd_urn'\n ResourceHint.register_resource_hint('X', HintX)\n ResourceHint.register_resource_hint('Y', HintY)\n ResourceHint.register_resource_hint('IsOdd', HintIsOdd)\n p = beam.Pipeline()\n num_iter = 4\n for i in range(num_iter):\n _ = p | f'NoHintCreate_{i}' >> beam.Create([1, 2]\n ) | f'NoHint_{i}' >> beam.Map(lambda x: x + 1)\n _ = p | f'XCreate_{i}' >> beam.Create([1, 2]\n ) | f'HintX_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(X='X')\n _ = p | f'XYCreate_{i}' >> beam.Create([1, 2]\n ) | f'HintXY_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(X='X', Y='Y')\n _ = p | f'IsOddCreate_{i}' >> beam.Create([1, 2]\n ) | f'IsOdd_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(IsOdd=str(i % 2 != 0))\n proto = Pipeline.to_runner_api(p)\n count_x = count_xy = count_is_odd = count_no_hints = 0\n env_ids = set()\n for _, t in proto.components.transforms.items():\n env = proto.components.environments[t.environment_id]\n if t.unique_name.startswith('HintX_'):\n count_x += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {'X_urn': b'X'})\n if t.unique_name.startswith('HintXY_'):\n count_xy += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {'X_urn': b'X',\n 'Y_urn': b'Y'})\n if t.unique_name.startswith('NoHint_'):\n count_no_hints += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {})\n if t.unique_name.startswith('IsOdd_'):\n count_is_odd += 1\n env_ids.add(t.environment_id)\n self.assertTrue(env.resource_hints == {'IsOdd_urn': b'True'\n } or env.resource_hints == {'IsOdd_urn': b'False'})\n assert count_x == count_is_odd == count_xy == count_no_hints == num_iter\n assert num_iter > 1\n self.assertEqual(len(env_ids), 5)\n\n def test_multiple_application_of_the_same_transform_set_different_hints(\n self):\n\n\n class FooHint(ResourceHint):\n urn = 'foo_urn'\n\n\n class UseMaxValueHint(ResourceHint):\n urn = 'use_max_value_urn'\n\n @classmethod\n def get_merged_value(cls, outer_value, inner_value):\n return ResourceHint._use_max(outer_value, inner_value)\n ResourceHint.register_resource_hint('foo_hint', FooHint)\n ResourceHint.register_resource_hint('use_max_value_hint',\n UseMaxValueHint)\n\n @beam.ptransform_fn\n def SubTransform(pcoll):\n return pcoll | beam.Map(lambda x: x + 1)\n\n @beam.ptransform_fn\n def CompositeTransform(pcoll):\n sub = SubTransform()\n return pcoll | 'first' >> sub.with_resource_hints(foo_hint=\n 'first_application') | 'second' >> sub.with_resource_hints(\n foo_hint='second_application')\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | CompositeTransform()\n proto = Pipeline.to_runner_api(p)\n count = 0\n for t in proto.components.transforms.values():\n if 'CompositeTransform/first/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(b'first_application', environment.\n resource_hints.get('foo_urn'))\n count += 1\n if 'CompositeTransform/second/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(b'second_application', environment.\n resource_hints.get('foo_urn'))\n count += 1\n assert count == 2\n\n def test_environments_are_deduplicated(self):\n\n def file_artifact(path, hash, staged_name):\n return beam_runner_api_pb2.ArtifactInformation(type_urn=\n common_urns.artifact_types.FILE.urn, type_payload=\n beam_runner_api_pb2.ArtifactFilePayload(path=path, sha256=\n hash).SerializeToString(), role_urn=common_urns.\n artifact_roles.STAGING_TO.urn, role_payload=\n beam_runner_api_pb2.ArtifactStagingToRolePayload(\n staged_name=staged_name).SerializeToString())\n proto = beam_runner_api_pb2.Pipeline(components=beam_runner_api_pb2\n .Components(transforms={f'transform{ix}': beam_runner_api_pb2.\n PTransform(environment_id=f'e{ix}') for ix in range(8)},\n environments={'e1': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest')]), 'e2':\n beam_runner_api_pb2.Environment(dependencies=[file_artifact(\n 'a2', 'x', 'dest')]), 'e3': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a3', 'y', 'dest')]), 'e4':\n beam_runner_api_pb2.Environment(dependencies=[file_artifact(\n 'a4', 'y', 'dest2')]), 'e5': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest'), file_artifact(\n 'b1', 'xb', 'destB')]), 'e6': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a2', 'x', 'dest'), file_artifact(\n 'b2', 'xb', 'destB')]), 'e7': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest'), file_artifact(\n 'b2', 'y', 'destB')]), 'e0': beam_runner_api_pb2.Environment(\n resource_hints={'hint': b'value'}, dependencies=[file_artifact(\n 'a1', 'x', 'dest')])}))\n Pipeline.merge_compatible_environments(proto)\n self.assertEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform2'].\n environment_id)\n self.assertEqual(proto.components.transforms['transform5'].\n environment_id, proto.components.transforms['transform6'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform3'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform4'].\n environment_id, proto.components.transforms['transform3'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform6'].\n environment_id, proto.components.transforms['transform7'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform0'].\n environment_id)\n self.assertEqual(len(proto.components.environments), 6)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PipelineTest(unittest.TestCase):\n\n @staticmethod\n def custom_callable(pcoll):\n return pcoll | '+1' >> FlatMap(lambda x: [x + 1])\n\n\n class CustomTransform(PTransform):\n\n def expand(self, pcoll):\n return pcoll | '+1' >> FlatMap(lambda x: [x + 1])\n\n\n class Visitor(PipelineVisitor):\n\n def __init__(self, visited):\n self.visited = visited\n self.enter_composite = []\n self.leave_composite = []\n\n def visit_value(self, value, _):\n self.visited.append(value)\n\n def enter_composite_transform(self, transform_node):\n self.enter_composite.append(transform_node)\n\n def leave_composite_transform(self, transform_node):\n self.leave_composite.append(transform_node)\n\n def test_create(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'label1' >> Create([1, 2, 3])\n assert_that(pcoll, equal_to([1, 2, 3]))\n pcoll2 = pipeline | 'label2' >> Create(iter((4, 5, 6)))\n pcoll3 = pcoll2 | 'do' >> FlatMap(lambda x: [x + 10])\n assert_that(pcoll3, equal_to([14, 15, 16]), label='pcoll3')\n <mask token>\n\n def test_maptuple_builtin(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | Create([('e1', 'e2')])\n side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create([\n 's1']))\n side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create([\n 's2']))\n fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (e1,\n e2, t, s1, s2)\n assert_that(pcoll | 'NoSides' >> beam.core.MapTuple(fn),\n equal_to([('e1', 'e2', MIN_TIMESTAMP, None, None)]), label=\n 'NoSidesCheck')\n assert_that(pcoll | 'StaticSides' >> beam.core.MapTuple(fn,\n 's1', 's2'), equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1',\n 's2')]), label='StaticSidesCheck')\n assert_that(pcoll | 'DynamicSides' >> beam.core.MapTuple(fn,\n side1, side2), equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1',\n 's2')]), label='DynamicSidesCheck')\n assert_that(pcoll | 'MixedSides' >> beam.core.MapTuple(fn, s2=\n side2), equal_to([('e1', 'e2', MIN_TIMESTAMP, None, 's2')]),\n label='MixedSidesCheck')\n <mask token>\n\n def test_create_singleton_pcollection(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'label' >> Create([[1, 2, 3]])\n assert_that(pcoll, equal_to([[1, 2, 3]]))\n\n def test_visit_entire_graph(self):\n pipeline = Pipeline()\n pcoll1 = pipeline | 'pcoll' >> beam.Impulse()\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n pcoll4 = pcoll2 | 'do3' >> FlatMap(lambda x: [x + 1])\n transform = PipelineTest.CustomTransform()\n pcoll5 = pcoll4 | transform\n visitor = PipelineTest.Visitor(visited=[])\n pipeline.visit(visitor)\n self.assertEqual({pcoll1, pcoll2, pcoll3, pcoll4, pcoll5}, set(\n visitor.visited))\n self.assertEqual(set(visitor.enter_composite), set(visitor.\n leave_composite))\n self.assertEqual(2, len(visitor.enter_composite))\n self.assertEqual(visitor.enter_composite[1].transform, transform)\n self.assertEqual(visitor.leave_composite[0].transform, transform)\n\n def test_apply_custom_transform(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'pcoll' >> Create([1, 2, 3])\n result = pcoll | PipelineTest.CustomTransform()\n assert_that(result, equal_to([2, 3, 4]))\n\n def test_reuse_custom_transform_instance(self):\n pipeline = Pipeline()\n pcoll1 = pipeline | 'pcoll1' >> Create([1, 2, 3])\n pcoll2 = pipeline | 'pcoll2' >> Create([4, 5, 6])\n transform = PipelineTest.CustomTransform()\n pcoll1 | transform\n with self.assertRaises(RuntimeError) as cm:\n pipeline.apply(transform, pcoll2)\n self.assertEqual(cm.exception.args[0],\n 'A transform with label \"CustomTransform\" already exists in the pipeline. To apply a transform with a specified label write pvalue | \"label\" >> transform'\n )\n\n def test_reuse_cloned_custom_transform_instance(self):\n with TestPipeline() as pipeline:\n pcoll1 = pipeline | 'pc1' >> Create([1, 2, 3])\n pcoll2 = pipeline | 'pc2' >> Create([4, 5, 6])\n transform = PipelineTest.CustomTransform()\n result1 = pcoll1 | transform\n result2 = pcoll2 | 'new_label' >> transform\n assert_that(result1, equal_to([2, 3, 4]), label='r1')\n assert_that(result2, equal_to([5, 6, 7]), label='r2')\n\n def test_transform_no_super_init(self):\n\n\n class AddSuffix(PTransform):\n\n def __init__(self, suffix):\n self.suffix = suffix\n\n def expand(self, pcoll):\n return pcoll | Map(lambda x: x + self.suffix)\n self.assertEqual(['a-x', 'b-x', 'c-x'], sorted(['a', 'b', 'c'] | \n 'AddSuffix' >> AddSuffix('-x')))\n\n @unittest.skip('Fails on some platforms with new urllib3.')\n def test_memory_usage(self):\n try:\n import resource\n except ImportError:\n self.skipTest('resource module not available.')\n if platform.mac_ver()[0]:\n self.skipTest('ru_maxrss is not in standard units.')\n\n def get_memory_usage_in_bytes():\n return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 2 ** 10\n\n def check_memory(value, memory_threshold):\n memory_usage = get_memory_usage_in_bytes()\n if memory_usage > memory_threshold:\n raise RuntimeError('High memory usage: %d > %d' % (\n memory_usage, memory_threshold))\n return value\n len_elements = 1000000\n num_elements = 10\n num_maps = 100\n with TestPipeline(runner='BundleBasedDirectRunner') as pipeline:\n memory_threshold = get_memory_usage_in_bytes(\n ) + 5 * len_elements * num_elements\n memory_threshold += 10 * 2 ** 20\n biglist = pipeline | 'oom:create' >> Create(['x' * len_elements\n ] * num_elements)\n for i in range(num_maps):\n biglist = biglist | 'oom:addone-%d' % i >> Map(lambda x: x +\n 'y')\n result = biglist | 'oom:check' >> Map(check_memory,\n memory_threshold)\n assert_that(result, equal_to(['x' * len_elements + 'y' *\n num_maps] * num_elements))\n\n def test_aggregator_empty_input(self):\n actual = [] | CombineGlobally(max).without_defaults()\n self.assertEqual(actual, [])\n\n def test_pipeline_as_context(self):\n\n def raise_exception(exn):\n raise exn\n with self.assertRaises(ValueError):\n with Pipeline() as p:\n p | Create([ValueError('msg')]) | Map(raise_exception)\n\n def test_ptransform_overrides(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n ptransform = applied_ptransform.transform\n if isinstance(ptransform, DoubleParDo):\n return TripleParDo()\n raise ValueError('Unsupported type of transform: %r' %\n ptransform)\n p = Pipeline()\n pcoll = p | beam.Create([1, 2, 3]) | 'Multiply' >> DoubleParDo()\n assert_that(pcoll, equal_to([3, 6, 9]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_type_hints(self):\n\n\n class NoTypeHintOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n return ToStringParDo()\n\n\n class WithTypeHintOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n return ToStringParDo().with_input_types(int).with_output_types(\n str)\n for override, expected_type in [(NoTypeHintOverride(), int), (\n WithTypeHintOverride(), str)]:\n p = TestPipeline()\n pcoll = p | beam.Create([1, 2, 3]) | 'Operate' >> DoubleParDo(\n ) | 'NoOp' >> beam.Map(lambda x: x)\n p.replace_all([override])\n self.assertEqual(pcoll.producer.inputs[0].element_type,\n expected_type)\n\n def test_ptransform_override_multiple_inputs(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform,\n FlattenAndDouble)\n\n def get_replacement_transform(self, applied_ptransform):\n return FlattenAndTriple()\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([1, 2, 3])\n pcoll2 = p | 'pc2' >> beam.Create([4, 5, 6])\n pcoll3 = (pcoll1, pcoll2) | 'FlattenAndMultiply' >> FlattenAndDouble()\n assert_that(pcoll3, equal_to([3, 6, 9, 12, 15, 18]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_side_inputs(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, ParDo\n ) and isinstance(applied_ptransform.transform.fn,\n AddWithProductDoFn)\n\n def get_replacement_transform(self, transform):\n return AddThenMultiply()\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([2])\n pcoll2 = p | 'pc2' >> beam.Create([3])\n pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])\n result = pcoll3 | 'Operate' >> beam.ParDo(AddWithProductDoFn(),\n AsSingleton(pcoll1), AsSingleton(pcoll2))\n assert_that(result, equal_to([18, 21, 24]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_replacement_inputs(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, ParDo\n ) and isinstance(applied_ptransform.transform.fn,\n AddWithProductDoFn)\n\n def get_replacement_transform(self, transform):\n return AddThenMultiply()\n\n def get_replacement_inputs(self, applied_ptransform):\n assert len(applied_ptransform.inputs) == 1\n assert len(applied_ptransform.side_inputs) == 2\n return applied_ptransform.inputs[0\n ], applied_ptransform.side_inputs[1\n ].pvalue, applied_ptransform.side_inputs[0].pvalue\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([2])\n pcoll2 = p | 'pc2' >> beam.Create([3])\n pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])\n result = pcoll3 | 'Operate' >> beam.ParDo(AddWithProductDoFn(),\n AsSingleton(pcoll1), AsSingleton(pcoll2))\n assert_that(result, equal_to([14, 16, 18]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_multiple_outputs(self):\n\n\n class MultiOutputComposite(PTransform):\n\n def __init__(self):\n self.output_tags = set()\n\n def expand(self, pcoll):\n\n def mux_input(x):\n x = x * 2\n if isinstance(x, int):\n yield TaggedOutput('numbers', x)\n else:\n yield TaggedOutput('letters', x)\n multi = pcoll | 'MyReplacement' >> beam.ParDo(mux_input\n ).with_outputs()\n letters = multi.letters | 'LettersComposite' >> beam.Map(lambda\n x: x * 3)\n numbers = multi.numbers | 'NumbersComposite' >> beam.Map(lambda\n x: x * 5)\n return {'letters': letters, 'numbers': numbers}\n\n\n class MultiOutputOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return applied_ptransform.full_label == 'MyMultiOutput'\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n return MultiOutputComposite()\n\n def mux_input(x):\n if isinstance(x, int):\n yield TaggedOutput('numbers', x)\n else:\n yield TaggedOutput('letters', x)\n with TestPipeline() as p:\n multi = p | beam.Create([1, 2, 3, 'a', 'b', 'c']\n ) | 'MyMultiOutput' >> beam.ParDo(mux_input).with_outputs()\n letters = multi.letters | 'MyLetters' >> beam.Map(lambda x: x)\n numbers = multi.numbers | 'MyNumbers' >> beam.Map(lambda x: x)\n assert_that(letters, equal_to(['a' * 2 * 3, 'b' * 2 * 3, 'c' * \n 2 * 3]), label='assert letters')\n assert_that(numbers, equal_to([1 * 2 * 5, 2 * 2 * 5, 3 * 2 * 5]\n ), label='assert numbers')\n p.replace_all([MultiOutputOverride()])\n visitor = PipelineTest.Visitor(visited=[])\n p.visit(visitor)\n pcollections = visitor.visited\n composites = visitor.enter_composite\n self.assertIn(MultiOutputComposite, [t.transform.__class__ for t in\n composites])\n multi_output_composite = list(filter(lambda t: t.transform.\n __class__ == MultiOutputComposite, composites))[0]\n for output in multi_output_composite.outputs.values():\n self.assertIn(output, pcollections)\n self.assertNotIn(multi[None], visitor.visited)\n self.assertNotIn(multi.letters, visitor.visited)\n self.assertNotIn(multi.numbers, visitor.visited)\n\n def test_kv_ptransform_honor_type_hints(self):\n\n\n class StatefulDoFn(DoFn):\n BYTES_STATE = BagStateSpec('bytes', BytesCoder())\n\n def return_recursive(self, count):\n if count == 0:\n return ['some string']\n else:\n self.return_recursive(count - 1)\n\n def process(self, element, counter=DoFn.StateParam(BYTES_STATE)):\n return self.return_recursive(1)\n with TestPipeline() as p:\n pcoll = p | beam.Create([(1, 1), (2, 2), (3, 3)]\n ) | beam.GroupByKey() | beam.ParDo(StatefulDoFn())\n self.assertEqual(pcoll.element_type, typehints.Any)\n with TestPipeline() as p:\n pcoll = p | beam.Create([(1, 1), (2, 2), (3, 3)]\n ) | beam.GroupByKey() | beam.ParDo(StatefulDoFn()\n ).with_output_types(str)\n self.assertEqual(pcoll.element_type, str)\n\n def test_track_pcoll_unbounded(self):\n pipeline = TestPipeline()\n pcoll1 = pipeline | 'read' >> Read(FakeUnboundedSource())\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n self.assertIs(pcoll1.is_bounded, False)\n self.assertIs(pcoll2.is_bounded, False)\n self.assertIs(pcoll3.is_bounded, False)\n\n def test_track_pcoll_bounded(self):\n pipeline = TestPipeline()\n pcoll1 = pipeline | 'label1' >> Create([1, 2, 3])\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n self.assertIs(pcoll1.is_bounded, True)\n self.assertIs(pcoll2.is_bounded, True)\n self.assertIs(pcoll3.is_bounded, True)\n\n def test_track_pcoll_bounded_flatten(self):\n pipeline = TestPipeline()\n pcoll1_a = pipeline | 'label_a' >> Create([1, 2, 3])\n pcoll2_a = pcoll1_a | 'do_a' >> FlatMap(lambda x: [x + 1])\n pcoll1_b = pipeline | 'label_b' >> Create([1, 2, 3])\n pcoll2_b = pcoll1_b | 'do_b' >> FlatMap(lambda x: [x + 1])\n merged = (pcoll2_a, pcoll2_b) | beam.Flatten()\n self.assertIs(pcoll1_a.is_bounded, True)\n self.assertIs(pcoll2_a.is_bounded, True)\n self.assertIs(pcoll1_b.is_bounded, True)\n self.assertIs(pcoll2_b.is_bounded, True)\n self.assertIs(merged.is_bounded, True)\n\n def test_track_pcoll_unbounded_flatten(self):\n pipeline = TestPipeline()\n pcoll1_bounded = pipeline | 'label1' >> Create([1, 2, 3])\n pcoll2_bounded = pcoll1_bounded | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll1_unbounded = pipeline | 'read' >> Read(FakeUnboundedSource())\n pcoll2_unbounded = pcoll1_unbounded | 'do2' >> FlatMap(lambda x: [x +\n 1])\n merged = (pcoll2_bounded, pcoll2_unbounded) | beam.Flatten()\n self.assertIs(pcoll1_bounded.is_bounded, True)\n self.assertIs(pcoll2_bounded.is_bounded, True)\n self.assertIs(pcoll1_unbounded.is_bounded, False)\n self.assertIs(pcoll2_unbounded.is_bounded, False)\n self.assertIs(merged.is_bounded, False)\n\n def test_incompatible_submission_and_runtime_envs_fail_pipeline(self):\n with mock.patch(\n 'apache_beam.transforms.environments.sdk_base_version_capability'\n ) as base_version:\n base_version.side_effect = [\n f'beam:version:sdk_base:apache/beam_python3.5_sdk:2.{i}.0' for\n i in range(100)]\n with self.assertRaisesRegex(RuntimeError,\n 'Pipeline construction environment and pipeline runtime environment are not compatible.'\n ):\n with TestPipeline() as p:\n _ = p | Create([None])\n\n\nclass DoFnTest(unittest.TestCase):\n\n def test_element(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element):\n yield element + 10\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(\n TestDoFn())\n assert_that(pcoll, equal_to([11, 12]))\n\n def test_side_input_no_tag(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, prefix, suffix):\n return ['%s-%s-%s' % (prefix, element, suffix)]\n with TestPipeline() as pipeline:\n words_list = ['aa', 'bb', 'cc']\n words = pipeline | 'SomeWords' >> Create(words_list)\n prefix = 'zyx'\n suffix = pipeline | 'SomeString' >> Create(['xyz'])\n result = words | 'DecorateWordsDoFnNoTag' >> ParDo(TestDoFn(),\n prefix, suffix=AsSingleton(suffix))\n assert_that(result, equal_to([('zyx-%s-xyz' % x) for x in\n words_list]))\n\n def test_side_input_tagged(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, prefix, suffix=DoFn.SideInputParam):\n return ['%s-%s-%s' % (prefix, element, suffix)]\n with TestPipeline() as pipeline:\n words_list = ['aa', 'bb', 'cc']\n words = pipeline | 'SomeWords' >> Create(words_list)\n prefix = 'zyx'\n suffix = pipeline | 'SomeString' >> Create(['xyz'])\n result = words | 'DecorateWordsDoFnNoTag' >> ParDo(TestDoFn(),\n prefix, suffix=AsSingleton(suffix))\n assert_that(result, equal_to([('zyx-%s-xyz' % x) for x in\n words_list]))\n\n @pytest.mark.it_validatesrunner\n def test_element_param(self):\n pipeline = TestPipeline()\n input = [1, 2]\n pcoll = pipeline | 'Create' >> Create(input) | 'Ele param' >> Map(\n lambda element=DoFn.ElementParam: element)\n assert_that(pcoll, equal_to(input))\n pipeline.run()\n\n @pytest.mark.it_validatesrunner\n def test_key_param(self):\n pipeline = TestPipeline()\n pcoll = pipeline | 'Create' >> Create([('a', 1), ('b', 2)]\n ) | 'Key param' >> Map(lambda _, key=DoFn.KeyParam: key)\n assert_that(pcoll, equal_to(['a', 'b']))\n pipeline.run()\n\n def test_window_param(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, window=DoFn.WindowParam):\n yield element, (float(window.start), float(window.end))\n with TestPipeline() as pipeline:\n pcoll = pipeline | Create([1, 7]) | Map(lambda x:\n TimestampedValue(x, x)) | WindowInto(windowfn=\n SlidingWindows(10, 5)) | ParDo(TestDoFn())\n assert_that(pcoll, equal_to([(1, (-5, 5)), (1, (0, 10)), (7, (0,\n 10)), (7, (5, 15))]))\n pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn())\n assert_that(pcoll2, equal_to([((1, (-5, 5)), (-5, 5)), ((1, (0,\n 10)), (0, 10)), ((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5,\n 15))]), label='doubled windows')\n\n def test_timestamp_param(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, timestamp=DoFn.TimestampParam):\n yield timestamp\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(\n TestDoFn())\n assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))\n\n def test_timestamp_param_map(self):\n with TestPipeline() as p:\n assert_that(p | Create([1, 2]) | beam.Map(lambda _, t=DoFn.\n TimestampParam: t), equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))\n\n def test_pane_info_param(self):\n with TestPipeline() as p:\n pc = p | Create([(None, None)])\n assert_that(pc | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),\n equal_to([windowed_value.PANE_INFO_UNKNOWN]), label=\n 'CheckUngrouped')\n assert_that(pc | beam.GroupByKey() | beam.Map(lambda _, p=DoFn.\n PaneInfoParam: p), equal_to([windowed_value.PaneInfo(\n is_first=True, is_last=True, timing=windowed_value.\n PaneInfoTiming.ON_TIME, index=0, nonspeculative_index=0)]),\n label='CheckGrouped')\n\n def test_incomparable_default(self):\n\n\n class IncomparableType(object):\n\n def __eq__(self, other):\n raise RuntimeError()\n\n def __ne__(self, other):\n raise RuntimeError()\n\n def __hash__(self):\n raise RuntimeError()\n with TestPipeline() as pipeline:\n pcoll = pipeline | beam.Create([None]) | Map(lambda e, x=\n IncomparableType(): (e, type(x).__name__))\n assert_that(pcoll, equal_to([(None, 'IncomparableType')]))\n\n\nclass Bacon(PipelineOptions):\n\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument('--slices', type=int)\n\n\nclass Eggs(PipelineOptions):\n\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument('--style', default='scrambled')\n\n\nclass Breakfast(Bacon, Eggs):\n pass\n\n\nclass PipelineOptionsTest(unittest.TestCase):\n\n def test_flag_parsing(self):\n options = Breakfast(['--slices=3', '--style=sunny side up',\n '--ignored'])\n self.assertEqual(3, options.slices)\n self.assertEqual('sunny side up', options.style)\n\n def test_keyword_parsing(self):\n options = Breakfast(['--slices=3', '--style=sunny side up',\n '--ignored'], slices=10)\n self.assertEqual(10, options.slices)\n self.assertEqual('sunny side up', options.style)\n\n def test_attribute_setting(self):\n options = Breakfast(slices=10)\n self.assertEqual(10, options.slices)\n options.slices = 20\n self.assertEqual(20, options.slices)\n\n def test_view_as(self):\n generic_options = PipelineOptions(['--slices=3'])\n self.assertEqual(3, generic_options.view_as(Bacon).slices)\n self.assertEqual(3, generic_options.view_as(Breakfast).slices)\n generic_options.view_as(Breakfast).slices = 10\n self.assertEqual(10, generic_options.view_as(Bacon).slices)\n with self.assertRaises(AttributeError):\n generic_options.slices\n with self.assertRaises(AttributeError):\n generic_options.view_as(Eggs).slices\n\n def test_defaults(self):\n options = Breakfast(['--slices=3'])\n self.assertEqual(3, options.slices)\n self.assertEqual('scrambled', options.style)\n\n def test_dir(self):\n options = Breakfast()\n self.assertEqual({'from_dictionary', 'get_all_options', 'slices',\n 'style', 'view_as', 'display_data'}, {attr for attr in dir(\n options) if not attr.startswith('_') and attr != 'next'})\n self.assertEqual({'from_dictionary', 'get_all_options', 'style',\n 'view_as', 'display_data'}, {attr for attr in dir(options.\n view_as(Eggs)) if not attr.startswith('_') and attr != 'next'})\n\n\nclass RunnerApiTest(unittest.TestCase):\n\n def test_parent_pointer(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n p = beam.Pipeline()\n p | MyPTransform()\n p = Pipeline.from_runner_api(Pipeline.to_runner_api(p,\n use_fake_coders=True), None, None)\n self.assertIsNotNone(p.transforms_stack[0].parts[0].parent)\n self.assertEqual(p.transforms_stack[0].parts[0].parent, p.\n transforms_stack[0])\n\n def test_requirements(self):\n p = beam.Pipeline()\n _ = p | beam.Create([]) | beam.ParDo(lambda x, finalize=beam.DoFn.\n BundleFinalizerParam: None)\n proto = p.to_runner_api()\n self.assertTrue(common_urns.requirements.\n REQUIRES_BUNDLE_FINALIZATION.urn, proto.requirements)\n\n def test_annotations(self):\n some_proto = BytesCoder().to_runner_api(None)\n\n\n class EmptyTransform(beam.PTransform):\n\n def expand(self, pcoll):\n return pcoll\n\n def annotations(self):\n return {'foo': 'some_string'}\n\n\n class NonEmptyTransform(beam.PTransform):\n\n def expand(self, pcoll):\n return pcoll | beam.Map(lambda x: x)\n\n def annotations(self):\n return {'foo': b'some_bytes', 'proto': some_proto}\n p = beam.Pipeline()\n _ = p | beam.Create([]) | EmptyTransform() | NonEmptyTransform()\n proto = p.to_runner_api()\n seen = 0\n for transform in proto.components.transforms.values():\n if transform.unique_name == 'EmptyTransform':\n seen += 1\n self.assertEqual(transform.annotations['foo'], b'some_string')\n elif transform.unique_name == 'NonEmptyTransform':\n seen += 1\n self.assertEqual(transform.annotations['foo'], b'some_bytes')\n self.assertEqual(transform.annotations['proto'], some_proto\n .SerializeToString())\n self.assertEqual(seen, 2)\n\n def test_transform_ids(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n p = beam.Pipeline()\n p | MyPTransform()\n runner_api_proto = Pipeline.to_runner_api(p)\n for transform_id in runner_api_proto.components.transforms:\n self.assertRegex(transform_id, '[a-zA-Z0-9-_]+')\n\n def test_input_names(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, pcolls):\n return pcolls.values() | beam.Flatten()\n p = beam.Pipeline()\n input_names = set('ABC')\n inputs = {x: (p | x >> beam.Create([x])) for x in input_names}\n inputs | MyPTransform()\n runner_api_proto = Pipeline.to_runner_api(p)\n for transform_proto in runner_api_proto.components.transforms.values():\n if transform_proto.unique_name == 'MyPTransform':\n self.assertEqual(set(transform_proto.inputs.keys()),\n input_names)\n break\n else:\n self.fail('Unable to find transform.')\n\n def test_display_data(self):\n\n\n class MyParentTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n def display_data(self):\n parent_dd = super().display_data()\n parent_dd['p_dd_string'] = DisplayDataItem('p_dd_string_value',\n label='p_dd_string_label')\n parent_dd['p_dd_string_2'] = DisplayDataItem(\n 'p_dd_string_value_2')\n parent_dd['p_dd_bool'] = DisplayDataItem(True, label=\n 'p_dd_bool_label')\n parent_dd['p_dd_int'] = DisplayDataItem(1, label=\n 'p_dd_int_label')\n return parent_dd\n\n\n class MyPTransform(MyParentTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n def display_data(self):\n parent_dd = super().display_data()\n parent_dd['dd_string'] = DisplayDataItem('dd_string_value',\n label='dd_string_label')\n parent_dd['dd_string_2'] = DisplayDataItem('dd_string_value_2')\n parent_dd['dd_bool'] = DisplayDataItem(False, label=\n 'dd_bool_label')\n parent_dd['dd_double'] = DisplayDataItem(1.1, label=\n 'dd_double_label')\n return parent_dd\n p = beam.Pipeline()\n p | MyPTransform()\n proto_pipeline = Pipeline.to_runner_api(p, use_fake_coders=True)\n my_transform, = [transform for transform in proto_pipeline.\n components.transforms.values() if transform.unique_name ==\n 'MyPTransform']\n self.assertIsNotNone(my_transform)\n self.assertListEqual(list(my_transform.display_data), [\n beam_runner_api_pb2.DisplayData(urn=common_urns.\n StandardDisplayData.DisplayData.LABELLED.urn, payload=\n beam_runner_api_pb2.LabelledPayload(label='p_dd_string_label',\n key='p_dd_string', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'p_dd_string_value').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='p_dd_string_2', key='p_dd_string_2', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'p_dd_string_value_2').SerializeToString()),\n beam_runner_api_pb2.DisplayData(urn=common_urns.\n StandardDisplayData.DisplayData.LABELLED.urn, payload=\n beam_runner_api_pb2.LabelledPayload(label='p_dd_bool_label',\n key='p_dd_bool', namespace=\n 'apache_beam.pipeline_test.MyPTransform', bool_value=True).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'p_dd_int_label', key='p_dd_int', namespace=\n 'apache_beam.pipeline_test.MyPTransform', int_value=1).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'dd_string_label', key='dd_string', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'dd_string_value').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='dd_string_2', key='dd_string_2', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'dd_string_value_2').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='dd_bool_label', key='dd_bool', namespace=\n 'apache_beam.pipeline_test.MyPTransform', bool_value=False).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'dd_double_label', key='dd_double', namespace=\n 'apache_beam.pipeline_test.MyPTransform', double_value=1.1).\n SerializeToString())])\n\n def test_runner_api_roundtrip_preserves_resource_hints(self):\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | beam.Map(lambda x: x + 1\n ).with_resource_hints(accelerator='gpu')\n self.assertEqual(p.transforms_stack[0].parts[1].transform.\n get_resource_hints(), {common_urns.resource_hints.ACCELERATOR.\n urn: b'gpu'})\n for _ in range(3):\n p = Pipeline.from_runner_api(Pipeline.to_runner_api(p), None, None)\n self.assertEqual(p.transforms_stack[0].parts[1].transform.\n get_resource_hints(), {common_urns.resource_hints.\n ACCELERATOR.urn: b'gpu'})\n\n def test_hints_on_composite_transforms_are_propagated_to_subtransforms(self\n ):\n\n\n class FooHint(ResourceHint):\n urn = 'foo_urn'\n\n\n class BarHint(ResourceHint):\n urn = 'bar_urn'\n\n\n class BazHint(ResourceHint):\n urn = 'baz_urn'\n\n\n class QuxHint(ResourceHint):\n urn = 'qux_urn'\n\n\n class UseMaxValueHint(ResourceHint):\n urn = 'use_max_value_urn'\n\n @classmethod\n def get_merged_value(cls, outer_value, inner_value):\n return ResourceHint._use_max(outer_value, inner_value)\n ResourceHint.register_resource_hint('foo_hint', FooHint)\n ResourceHint.register_resource_hint('bar_hint', BarHint)\n ResourceHint.register_resource_hint('baz_hint', BazHint)\n ResourceHint.register_resource_hint('qux_hint', QuxHint)\n ResourceHint.register_resource_hint('use_max_value_hint',\n UseMaxValueHint)\n\n @beam.ptransform_fn\n def SubTransform(pcoll):\n return pcoll | beam.Map(lambda x: x + 1).with_resource_hints(\n foo_hint='set_on_subtransform', use_max_value_hint='10')\n\n @beam.ptransform_fn\n def CompositeTransform(pcoll):\n return pcoll | beam.Map(lambda x: x * 2) | SubTransform()\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | CompositeTransform().with_resource_hints(\n foo_hint='should_be_overriden_by_subtransform', bar_hint=\n 'set_on_composite', baz_hint='set_on_composite',\n use_max_value_hint='100')\n options = PortableOptions([\n '--resource_hint=baz_hint=should_be_overriden_by_composite',\n '--resource_hint=qux_hint=set_via_options',\n '--environment_type=PROCESS',\n '--environment_option=process_command=foo',\n '--sdk_location=container'])\n environment = ProcessEnvironment.from_options(options)\n proto = Pipeline.to_runner_api(p, default_environment=environment)\n for t in proto.components.transforms.values():\n if 'CompositeTransform/SubTransform/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(environment.resource_hints.get('foo_urn'),\n b'set_on_subtransform')\n self.assertEqual(environment.resource_hints.get('bar_urn'),\n b'set_on_composite')\n self.assertEqual(environment.resource_hints.get('baz_urn'),\n b'set_on_composite')\n self.assertEqual(environment.resource_hints.get('qux_urn'),\n b'set_via_options')\n self.assertEqual(environment.resource_hints.get(\n 'use_max_value_urn'), b'100')\n found = True\n assert found\n\n def test_environments_with_same_resource_hints_are_reused(self):\n\n\n class HintX(ResourceHint):\n urn = 'X_urn'\n\n\n class HintY(ResourceHint):\n urn = 'Y_urn'\n\n\n class HintIsOdd(ResourceHint):\n urn = 'IsOdd_urn'\n ResourceHint.register_resource_hint('X', HintX)\n ResourceHint.register_resource_hint('Y', HintY)\n ResourceHint.register_resource_hint('IsOdd', HintIsOdd)\n p = beam.Pipeline()\n num_iter = 4\n for i in range(num_iter):\n _ = p | f'NoHintCreate_{i}' >> beam.Create([1, 2]\n ) | f'NoHint_{i}' >> beam.Map(lambda x: x + 1)\n _ = p | f'XCreate_{i}' >> beam.Create([1, 2]\n ) | f'HintX_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(X='X')\n _ = p | f'XYCreate_{i}' >> beam.Create([1, 2]\n ) | f'HintXY_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(X='X', Y='Y')\n _ = p | f'IsOddCreate_{i}' >> beam.Create([1, 2]\n ) | f'IsOdd_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(IsOdd=str(i % 2 != 0))\n proto = Pipeline.to_runner_api(p)\n count_x = count_xy = count_is_odd = count_no_hints = 0\n env_ids = set()\n for _, t in proto.components.transforms.items():\n env = proto.components.environments[t.environment_id]\n if t.unique_name.startswith('HintX_'):\n count_x += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {'X_urn': b'X'})\n if t.unique_name.startswith('HintXY_'):\n count_xy += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {'X_urn': b'X',\n 'Y_urn': b'Y'})\n if t.unique_name.startswith('NoHint_'):\n count_no_hints += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {})\n if t.unique_name.startswith('IsOdd_'):\n count_is_odd += 1\n env_ids.add(t.environment_id)\n self.assertTrue(env.resource_hints == {'IsOdd_urn': b'True'\n } or env.resource_hints == {'IsOdd_urn': b'False'})\n assert count_x == count_is_odd == count_xy == count_no_hints == num_iter\n assert num_iter > 1\n self.assertEqual(len(env_ids), 5)\n\n def test_multiple_application_of_the_same_transform_set_different_hints(\n self):\n\n\n class FooHint(ResourceHint):\n urn = 'foo_urn'\n\n\n class UseMaxValueHint(ResourceHint):\n urn = 'use_max_value_urn'\n\n @classmethod\n def get_merged_value(cls, outer_value, inner_value):\n return ResourceHint._use_max(outer_value, inner_value)\n ResourceHint.register_resource_hint('foo_hint', FooHint)\n ResourceHint.register_resource_hint('use_max_value_hint',\n UseMaxValueHint)\n\n @beam.ptransform_fn\n def SubTransform(pcoll):\n return pcoll | beam.Map(lambda x: x + 1)\n\n @beam.ptransform_fn\n def CompositeTransform(pcoll):\n sub = SubTransform()\n return pcoll | 'first' >> sub.with_resource_hints(foo_hint=\n 'first_application') | 'second' >> sub.with_resource_hints(\n foo_hint='second_application')\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | CompositeTransform()\n proto = Pipeline.to_runner_api(p)\n count = 0\n for t in proto.components.transforms.values():\n if 'CompositeTransform/first/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(b'first_application', environment.\n resource_hints.get('foo_urn'))\n count += 1\n if 'CompositeTransform/second/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(b'second_application', environment.\n resource_hints.get('foo_urn'))\n count += 1\n assert count == 2\n\n def test_environments_are_deduplicated(self):\n\n def file_artifact(path, hash, staged_name):\n return beam_runner_api_pb2.ArtifactInformation(type_urn=\n common_urns.artifact_types.FILE.urn, type_payload=\n beam_runner_api_pb2.ArtifactFilePayload(path=path, sha256=\n hash).SerializeToString(), role_urn=common_urns.\n artifact_roles.STAGING_TO.urn, role_payload=\n beam_runner_api_pb2.ArtifactStagingToRolePayload(\n staged_name=staged_name).SerializeToString())\n proto = beam_runner_api_pb2.Pipeline(components=beam_runner_api_pb2\n .Components(transforms={f'transform{ix}': beam_runner_api_pb2.\n PTransform(environment_id=f'e{ix}') for ix in range(8)},\n environments={'e1': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest')]), 'e2':\n beam_runner_api_pb2.Environment(dependencies=[file_artifact(\n 'a2', 'x', 'dest')]), 'e3': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a3', 'y', 'dest')]), 'e4':\n beam_runner_api_pb2.Environment(dependencies=[file_artifact(\n 'a4', 'y', 'dest2')]), 'e5': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest'), file_artifact(\n 'b1', 'xb', 'destB')]), 'e6': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a2', 'x', 'dest'), file_artifact(\n 'b2', 'xb', 'destB')]), 'e7': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest'), file_artifact(\n 'b2', 'y', 'destB')]), 'e0': beam_runner_api_pb2.Environment(\n resource_hints={'hint': b'value'}, dependencies=[file_artifact(\n 'a1', 'x', 'dest')])}))\n Pipeline.merge_compatible_environments(proto)\n self.assertEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform2'].\n environment_id)\n self.assertEqual(proto.components.transforms['transform5'].\n environment_id, proto.components.transforms['transform6'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform3'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform4'].\n environment_id, proto.components.transforms['transform3'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform6'].\n environment_id, proto.components.transforms['transform7'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform0'].\n environment_id)\n self.assertEqual(len(proto.components.environments), 6)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AddThenMultiply(beam.PTransform):\n <mask token>\n\n\nclass PipelineTest(unittest.TestCase):\n\n @staticmethod\n def custom_callable(pcoll):\n return pcoll | '+1' >> FlatMap(lambda x: [x + 1])\n\n\n class CustomTransform(PTransform):\n\n def expand(self, pcoll):\n return pcoll | '+1' >> FlatMap(lambda x: [x + 1])\n\n\n class Visitor(PipelineVisitor):\n\n def __init__(self, visited):\n self.visited = visited\n self.enter_composite = []\n self.leave_composite = []\n\n def visit_value(self, value, _):\n self.visited.append(value)\n\n def enter_composite_transform(self, transform_node):\n self.enter_composite.append(transform_node)\n\n def leave_composite_transform(self, transform_node):\n self.leave_composite.append(transform_node)\n\n def test_create(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'label1' >> Create([1, 2, 3])\n assert_that(pcoll, equal_to([1, 2, 3]))\n pcoll2 = pipeline | 'label2' >> Create(iter((4, 5, 6)))\n pcoll3 = pcoll2 | 'do' >> FlatMap(lambda x: [x + 10])\n assert_that(pcoll3, equal_to([14, 15, 16]), label='pcoll3')\n\n def test_flatmap_builtin(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'label1' >> Create([1, 2, 3])\n assert_that(pcoll, equal_to([1, 2, 3]))\n pcoll2 = pcoll | 'do' >> FlatMap(lambda x: [x + 10])\n assert_that(pcoll2, equal_to([11, 12, 13]), label='pcoll2')\n pcoll3 = pcoll2 | 'm1' >> Map(lambda x: [x, 12])\n assert_that(pcoll3, equal_to([[11, 12], [12, 12], [13, 12]]),\n label='pcoll3')\n pcoll4 = pcoll3 | 'do2' >> FlatMap(set)\n assert_that(pcoll4, equal_to([11, 12, 12, 12, 13]), label='pcoll4')\n\n def test_maptuple_builtin(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | Create([('e1', 'e2')])\n side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create([\n 's1']))\n side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create([\n 's2']))\n fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (e1,\n e2, t, s1, s2)\n assert_that(pcoll | 'NoSides' >> beam.core.MapTuple(fn),\n equal_to([('e1', 'e2', MIN_TIMESTAMP, None, None)]), label=\n 'NoSidesCheck')\n assert_that(pcoll | 'StaticSides' >> beam.core.MapTuple(fn,\n 's1', 's2'), equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1',\n 's2')]), label='StaticSidesCheck')\n assert_that(pcoll | 'DynamicSides' >> beam.core.MapTuple(fn,\n side1, side2), equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1',\n 's2')]), label='DynamicSidesCheck')\n assert_that(pcoll | 'MixedSides' >> beam.core.MapTuple(fn, s2=\n side2), equal_to([('e1', 'e2', MIN_TIMESTAMP, None, 's2')]),\n label='MixedSidesCheck')\n\n def test_flatmaptuple_builtin(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | Create([('e1', 'e2')])\n side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create([\n 's1']))\n side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create([\n 's2']))\n fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (e1,\n e2, t, s1, s2)\n assert_that(pcoll | 'NoSides' >> beam.core.FlatMapTuple(fn),\n equal_to(['e1', 'e2', MIN_TIMESTAMP, None, None]), label=\n 'NoSidesCheck')\n assert_that(pcoll | 'StaticSides' >> beam.core.FlatMapTuple(fn,\n 's1', 's2'), equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1',\n 's2']), label='StaticSidesCheck')\n assert_that(pcoll | 'DynamicSides' >> beam.core.FlatMapTuple(fn,\n side1, side2), equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1',\n 's2']), label='DynamicSidesCheck')\n assert_that(pcoll | 'MixedSides' >> beam.core.FlatMapTuple(fn,\n s2=side2), equal_to(['e1', 'e2', MIN_TIMESTAMP, None, 's2']\n ), label='MixedSidesCheck')\n\n def test_create_singleton_pcollection(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'label' >> Create([[1, 2, 3]])\n assert_that(pcoll, equal_to([[1, 2, 3]]))\n\n def test_visit_entire_graph(self):\n pipeline = Pipeline()\n pcoll1 = pipeline | 'pcoll' >> beam.Impulse()\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n pcoll4 = pcoll2 | 'do3' >> FlatMap(lambda x: [x + 1])\n transform = PipelineTest.CustomTransform()\n pcoll5 = pcoll4 | transform\n visitor = PipelineTest.Visitor(visited=[])\n pipeline.visit(visitor)\n self.assertEqual({pcoll1, pcoll2, pcoll3, pcoll4, pcoll5}, set(\n visitor.visited))\n self.assertEqual(set(visitor.enter_composite), set(visitor.\n leave_composite))\n self.assertEqual(2, len(visitor.enter_composite))\n self.assertEqual(visitor.enter_composite[1].transform, transform)\n self.assertEqual(visitor.leave_composite[0].transform, transform)\n\n def test_apply_custom_transform(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'pcoll' >> Create([1, 2, 3])\n result = pcoll | PipelineTest.CustomTransform()\n assert_that(result, equal_to([2, 3, 4]))\n\n def test_reuse_custom_transform_instance(self):\n pipeline = Pipeline()\n pcoll1 = pipeline | 'pcoll1' >> Create([1, 2, 3])\n pcoll2 = pipeline | 'pcoll2' >> Create([4, 5, 6])\n transform = PipelineTest.CustomTransform()\n pcoll1 | transform\n with self.assertRaises(RuntimeError) as cm:\n pipeline.apply(transform, pcoll2)\n self.assertEqual(cm.exception.args[0],\n 'A transform with label \"CustomTransform\" already exists in the pipeline. To apply a transform with a specified label write pvalue | \"label\" >> transform'\n )\n\n def test_reuse_cloned_custom_transform_instance(self):\n with TestPipeline() as pipeline:\n pcoll1 = pipeline | 'pc1' >> Create([1, 2, 3])\n pcoll2 = pipeline | 'pc2' >> Create([4, 5, 6])\n transform = PipelineTest.CustomTransform()\n result1 = pcoll1 | transform\n result2 = pcoll2 | 'new_label' >> transform\n assert_that(result1, equal_to([2, 3, 4]), label='r1')\n assert_that(result2, equal_to([5, 6, 7]), label='r2')\n\n def test_transform_no_super_init(self):\n\n\n class AddSuffix(PTransform):\n\n def __init__(self, suffix):\n self.suffix = suffix\n\n def expand(self, pcoll):\n return pcoll | Map(lambda x: x + self.suffix)\n self.assertEqual(['a-x', 'b-x', 'c-x'], sorted(['a', 'b', 'c'] | \n 'AddSuffix' >> AddSuffix('-x')))\n\n @unittest.skip('Fails on some platforms with new urllib3.')\n def test_memory_usage(self):\n try:\n import resource\n except ImportError:\n self.skipTest('resource module not available.')\n if platform.mac_ver()[0]:\n self.skipTest('ru_maxrss is not in standard units.')\n\n def get_memory_usage_in_bytes():\n return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 2 ** 10\n\n def check_memory(value, memory_threshold):\n memory_usage = get_memory_usage_in_bytes()\n if memory_usage > memory_threshold:\n raise RuntimeError('High memory usage: %d > %d' % (\n memory_usage, memory_threshold))\n return value\n len_elements = 1000000\n num_elements = 10\n num_maps = 100\n with TestPipeline(runner='BundleBasedDirectRunner') as pipeline:\n memory_threshold = get_memory_usage_in_bytes(\n ) + 5 * len_elements * num_elements\n memory_threshold += 10 * 2 ** 20\n biglist = pipeline | 'oom:create' >> Create(['x' * len_elements\n ] * num_elements)\n for i in range(num_maps):\n biglist = biglist | 'oom:addone-%d' % i >> Map(lambda x: x +\n 'y')\n result = biglist | 'oom:check' >> Map(check_memory,\n memory_threshold)\n assert_that(result, equal_to(['x' * len_elements + 'y' *\n num_maps] * num_elements))\n\n def test_aggregator_empty_input(self):\n actual = [] | CombineGlobally(max).without_defaults()\n self.assertEqual(actual, [])\n\n def test_pipeline_as_context(self):\n\n def raise_exception(exn):\n raise exn\n with self.assertRaises(ValueError):\n with Pipeline() as p:\n p | Create([ValueError('msg')]) | Map(raise_exception)\n\n def test_ptransform_overrides(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n ptransform = applied_ptransform.transform\n if isinstance(ptransform, DoubleParDo):\n return TripleParDo()\n raise ValueError('Unsupported type of transform: %r' %\n ptransform)\n p = Pipeline()\n pcoll = p | beam.Create([1, 2, 3]) | 'Multiply' >> DoubleParDo()\n assert_that(pcoll, equal_to([3, 6, 9]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_type_hints(self):\n\n\n class NoTypeHintOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n return ToStringParDo()\n\n\n class WithTypeHintOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n return ToStringParDo().with_input_types(int).with_output_types(\n str)\n for override, expected_type in [(NoTypeHintOverride(), int), (\n WithTypeHintOverride(), str)]:\n p = TestPipeline()\n pcoll = p | beam.Create([1, 2, 3]) | 'Operate' >> DoubleParDo(\n ) | 'NoOp' >> beam.Map(lambda x: x)\n p.replace_all([override])\n self.assertEqual(pcoll.producer.inputs[0].element_type,\n expected_type)\n\n def test_ptransform_override_multiple_inputs(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform,\n FlattenAndDouble)\n\n def get_replacement_transform(self, applied_ptransform):\n return FlattenAndTriple()\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([1, 2, 3])\n pcoll2 = p | 'pc2' >> beam.Create([4, 5, 6])\n pcoll3 = (pcoll1, pcoll2) | 'FlattenAndMultiply' >> FlattenAndDouble()\n assert_that(pcoll3, equal_to([3, 6, 9, 12, 15, 18]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_side_inputs(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, ParDo\n ) and isinstance(applied_ptransform.transform.fn,\n AddWithProductDoFn)\n\n def get_replacement_transform(self, transform):\n return AddThenMultiply()\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([2])\n pcoll2 = p | 'pc2' >> beam.Create([3])\n pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])\n result = pcoll3 | 'Operate' >> beam.ParDo(AddWithProductDoFn(),\n AsSingleton(pcoll1), AsSingleton(pcoll2))\n assert_that(result, equal_to([18, 21, 24]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_replacement_inputs(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, ParDo\n ) and isinstance(applied_ptransform.transform.fn,\n AddWithProductDoFn)\n\n def get_replacement_transform(self, transform):\n return AddThenMultiply()\n\n def get_replacement_inputs(self, applied_ptransform):\n assert len(applied_ptransform.inputs) == 1\n assert len(applied_ptransform.side_inputs) == 2\n return applied_ptransform.inputs[0\n ], applied_ptransform.side_inputs[1\n ].pvalue, applied_ptransform.side_inputs[0].pvalue\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([2])\n pcoll2 = p | 'pc2' >> beam.Create([3])\n pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])\n result = pcoll3 | 'Operate' >> beam.ParDo(AddWithProductDoFn(),\n AsSingleton(pcoll1), AsSingleton(pcoll2))\n assert_that(result, equal_to([14, 16, 18]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_multiple_outputs(self):\n\n\n class MultiOutputComposite(PTransform):\n\n def __init__(self):\n self.output_tags = set()\n\n def expand(self, pcoll):\n\n def mux_input(x):\n x = x * 2\n if isinstance(x, int):\n yield TaggedOutput('numbers', x)\n else:\n yield TaggedOutput('letters', x)\n multi = pcoll | 'MyReplacement' >> beam.ParDo(mux_input\n ).with_outputs()\n letters = multi.letters | 'LettersComposite' >> beam.Map(lambda\n x: x * 3)\n numbers = multi.numbers | 'NumbersComposite' >> beam.Map(lambda\n x: x * 5)\n return {'letters': letters, 'numbers': numbers}\n\n\n class MultiOutputOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return applied_ptransform.full_label == 'MyMultiOutput'\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n return MultiOutputComposite()\n\n def mux_input(x):\n if isinstance(x, int):\n yield TaggedOutput('numbers', x)\n else:\n yield TaggedOutput('letters', x)\n with TestPipeline() as p:\n multi = p | beam.Create([1, 2, 3, 'a', 'b', 'c']\n ) | 'MyMultiOutput' >> beam.ParDo(mux_input).with_outputs()\n letters = multi.letters | 'MyLetters' >> beam.Map(lambda x: x)\n numbers = multi.numbers | 'MyNumbers' >> beam.Map(lambda x: x)\n assert_that(letters, equal_to(['a' * 2 * 3, 'b' * 2 * 3, 'c' * \n 2 * 3]), label='assert letters')\n assert_that(numbers, equal_to([1 * 2 * 5, 2 * 2 * 5, 3 * 2 * 5]\n ), label='assert numbers')\n p.replace_all([MultiOutputOverride()])\n visitor = PipelineTest.Visitor(visited=[])\n p.visit(visitor)\n pcollections = visitor.visited\n composites = visitor.enter_composite\n self.assertIn(MultiOutputComposite, [t.transform.__class__ for t in\n composites])\n multi_output_composite = list(filter(lambda t: t.transform.\n __class__ == MultiOutputComposite, composites))[0]\n for output in multi_output_composite.outputs.values():\n self.assertIn(output, pcollections)\n self.assertNotIn(multi[None], visitor.visited)\n self.assertNotIn(multi.letters, visitor.visited)\n self.assertNotIn(multi.numbers, visitor.visited)\n\n def test_kv_ptransform_honor_type_hints(self):\n\n\n class StatefulDoFn(DoFn):\n BYTES_STATE = BagStateSpec('bytes', BytesCoder())\n\n def return_recursive(self, count):\n if count == 0:\n return ['some string']\n else:\n self.return_recursive(count - 1)\n\n def process(self, element, counter=DoFn.StateParam(BYTES_STATE)):\n return self.return_recursive(1)\n with TestPipeline() as p:\n pcoll = p | beam.Create([(1, 1), (2, 2), (3, 3)]\n ) | beam.GroupByKey() | beam.ParDo(StatefulDoFn())\n self.assertEqual(pcoll.element_type, typehints.Any)\n with TestPipeline() as p:\n pcoll = p | beam.Create([(1, 1), (2, 2), (3, 3)]\n ) | beam.GroupByKey() | beam.ParDo(StatefulDoFn()\n ).with_output_types(str)\n self.assertEqual(pcoll.element_type, str)\n\n def test_track_pcoll_unbounded(self):\n pipeline = TestPipeline()\n pcoll1 = pipeline | 'read' >> Read(FakeUnboundedSource())\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n self.assertIs(pcoll1.is_bounded, False)\n self.assertIs(pcoll2.is_bounded, False)\n self.assertIs(pcoll3.is_bounded, False)\n\n def test_track_pcoll_bounded(self):\n pipeline = TestPipeline()\n pcoll1 = pipeline | 'label1' >> Create([1, 2, 3])\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n self.assertIs(pcoll1.is_bounded, True)\n self.assertIs(pcoll2.is_bounded, True)\n self.assertIs(pcoll3.is_bounded, True)\n\n def test_track_pcoll_bounded_flatten(self):\n pipeline = TestPipeline()\n pcoll1_a = pipeline | 'label_a' >> Create([1, 2, 3])\n pcoll2_a = pcoll1_a | 'do_a' >> FlatMap(lambda x: [x + 1])\n pcoll1_b = pipeline | 'label_b' >> Create([1, 2, 3])\n pcoll2_b = pcoll1_b | 'do_b' >> FlatMap(lambda x: [x + 1])\n merged = (pcoll2_a, pcoll2_b) | beam.Flatten()\n self.assertIs(pcoll1_a.is_bounded, True)\n self.assertIs(pcoll2_a.is_bounded, True)\n self.assertIs(pcoll1_b.is_bounded, True)\n self.assertIs(pcoll2_b.is_bounded, True)\n self.assertIs(merged.is_bounded, True)\n\n def test_track_pcoll_unbounded_flatten(self):\n pipeline = TestPipeline()\n pcoll1_bounded = pipeline | 'label1' >> Create([1, 2, 3])\n pcoll2_bounded = pcoll1_bounded | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll1_unbounded = pipeline | 'read' >> Read(FakeUnboundedSource())\n pcoll2_unbounded = pcoll1_unbounded | 'do2' >> FlatMap(lambda x: [x +\n 1])\n merged = (pcoll2_bounded, pcoll2_unbounded) | beam.Flatten()\n self.assertIs(pcoll1_bounded.is_bounded, True)\n self.assertIs(pcoll2_bounded.is_bounded, True)\n self.assertIs(pcoll1_unbounded.is_bounded, False)\n self.assertIs(pcoll2_unbounded.is_bounded, False)\n self.assertIs(merged.is_bounded, False)\n\n def test_incompatible_submission_and_runtime_envs_fail_pipeline(self):\n with mock.patch(\n 'apache_beam.transforms.environments.sdk_base_version_capability'\n ) as base_version:\n base_version.side_effect = [\n f'beam:version:sdk_base:apache/beam_python3.5_sdk:2.{i}.0' for\n i in range(100)]\n with self.assertRaisesRegex(RuntimeError,\n 'Pipeline construction environment and pipeline runtime environment are not compatible.'\n ):\n with TestPipeline() as p:\n _ = p | Create([None])\n\n\nclass DoFnTest(unittest.TestCase):\n\n def test_element(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element):\n yield element + 10\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(\n TestDoFn())\n assert_that(pcoll, equal_to([11, 12]))\n\n def test_side_input_no_tag(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, prefix, suffix):\n return ['%s-%s-%s' % (prefix, element, suffix)]\n with TestPipeline() as pipeline:\n words_list = ['aa', 'bb', 'cc']\n words = pipeline | 'SomeWords' >> Create(words_list)\n prefix = 'zyx'\n suffix = pipeline | 'SomeString' >> Create(['xyz'])\n result = words | 'DecorateWordsDoFnNoTag' >> ParDo(TestDoFn(),\n prefix, suffix=AsSingleton(suffix))\n assert_that(result, equal_to([('zyx-%s-xyz' % x) for x in\n words_list]))\n\n def test_side_input_tagged(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, prefix, suffix=DoFn.SideInputParam):\n return ['%s-%s-%s' % (prefix, element, suffix)]\n with TestPipeline() as pipeline:\n words_list = ['aa', 'bb', 'cc']\n words = pipeline | 'SomeWords' >> Create(words_list)\n prefix = 'zyx'\n suffix = pipeline | 'SomeString' >> Create(['xyz'])\n result = words | 'DecorateWordsDoFnNoTag' >> ParDo(TestDoFn(),\n prefix, suffix=AsSingleton(suffix))\n assert_that(result, equal_to([('zyx-%s-xyz' % x) for x in\n words_list]))\n\n @pytest.mark.it_validatesrunner\n def test_element_param(self):\n pipeline = TestPipeline()\n input = [1, 2]\n pcoll = pipeline | 'Create' >> Create(input) | 'Ele param' >> Map(\n lambda element=DoFn.ElementParam: element)\n assert_that(pcoll, equal_to(input))\n pipeline.run()\n\n @pytest.mark.it_validatesrunner\n def test_key_param(self):\n pipeline = TestPipeline()\n pcoll = pipeline | 'Create' >> Create([('a', 1), ('b', 2)]\n ) | 'Key param' >> Map(lambda _, key=DoFn.KeyParam: key)\n assert_that(pcoll, equal_to(['a', 'b']))\n pipeline.run()\n\n def test_window_param(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, window=DoFn.WindowParam):\n yield element, (float(window.start), float(window.end))\n with TestPipeline() as pipeline:\n pcoll = pipeline | Create([1, 7]) | Map(lambda x:\n TimestampedValue(x, x)) | WindowInto(windowfn=\n SlidingWindows(10, 5)) | ParDo(TestDoFn())\n assert_that(pcoll, equal_to([(1, (-5, 5)), (1, (0, 10)), (7, (0,\n 10)), (7, (5, 15))]))\n pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn())\n assert_that(pcoll2, equal_to([((1, (-5, 5)), (-5, 5)), ((1, (0,\n 10)), (0, 10)), ((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5,\n 15))]), label='doubled windows')\n\n def test_timestamp_param(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, timestamp=DoFn.TimestampParam):\n yield timestamp\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(\n TestDoFn())\n assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))\n\n def test_timestamp_param_map(self):\n with TestPipeline() as p:\n assert_that(p | Create([1, 2]) | beam.Map(lambda _, t=DoFn.\n TimestampParam: t), equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))\n\n def test_pane_info_param(self):\n with TestPipeline() as p:\n pc = p | Create([(None, None)])\n assert_that(pc | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),\n equal_to([windowed_value.PANE_INFO_UNKNOWN]), label=\n 'CheckUngrouped')\n assert_that(pc | beam.GroupByKey() | beam.Map(lambda _, p=DoFn.\n PaneInfoParam: p), equal_to([windowed_value.PaneInfo(\n is_first=True, is_last=True, timing=windowed_value.\n PaneInfoTiming.ON_TIME, index=0, nonspeculative_index=0)]),\n label='CheckGrouped')\n\n def test_incomparable_default(self):\n\n\n class IncomparableType(object):\n\n def __eq__(self, other):\n raise RuntimeError()\n\n def __ne__(self, other):\n raise RuntimeError()\n\n def __hash__(self):\n raise RuntimeError()\n with TestPipeline() as pipeline:\n pcoll = pipeline | beam.Create([None]) | Map(lambda e, x=\n IncomparableType(): (e, type(x).__name__))\n assert_that(pcoll, equal_to([(None, 'IncomparableType')]))\n\n\nclass Bacon(PipelineOptions):\n\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument('--slices', type=int)\n\n\nclass Eggs(PipelineOptions):\n\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument('--style', default='scrambled')\n\n\nclass Breakfast(Bacon, Eggs):\n pass\n\n\nclass PipelineOptionsTest(unittest.TestCase):\n\n def test_flag_parsing(self):\n options = Breakfast(['--slices=3', '--style=sunny side up',\n '--ignored'])\n self.assertEqual(3, options.slices)\n self.assertEqual('sunny side up', options.style)\n\n def test_keyword_parsing(self):\n options = Breakfast(['--slices=3', '--style=sunny side up',\n '--ignored'], slices=10)\n self.assertEqual(10, options.slices)\n self.assertEqual('sunny side up', options.style)\n\n def test_attribute_setting(self):\n options = Breakfast(slices=10)\n self.assertEqual(10, options.slices)\n options.slices = 20\n self.assertEqual(20, options.slices)\n\n def test_view_as(self):\n generic_options = PipelineOptions(['--slices=3'])\n self.assertEqual(3, generic_options.view_as(Bacon).slices)\n self.assertEqual(3, generic_options.view_as(Breakfast).slices)\n generic_options.view_as(Breakfast).slices = 10\n self.assertEqual(10, generic_options.view_as(Bacon).slices)\n with self.assertRaises(AttributeError):\n generic_options.slices\n with self.assertRaises(AttributeError):\n generic_options.view_as(Eggs).slices\n\n def test_defaults(self):\n options = Breakfast(['--slices=3'])\n self.assertEqual(3, options.slices)\n self.assertEqual('scrambled', options.style)\n\n def test_dir(self):\n options = Breakfast()\n self.assertEqual({'from_dictionary', 'get_all_options', 'slices',\n 'style', 'view_as', 'display_data'}, {attr for attr in dir(\n options) if not attr.startswith('_') and attr != 'next'})\n self.assertEqual({'from_dictionary', 'get_all_options', 'style',\n 'view_as', 'display_data'}, {attr for attr in dir(options.\n view_as(Eggs)) if not attr.startswith('_') and attr != 'next'})\n\n\nclass RunnerApiTest(unittest.TestCase):\n\n def test_parent_pointer(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n p = beam.Pipeline()\n p | MyPTransform()\n p = Pipeline.from_runner_api(Pipeline.to_runner_api(p,\n use_fake_coders=True), None, None)\n self.assertIsNotNone(p.transforms_stack[0].parts[0].parent)\n self.assertEqual(p.transforms_stack[0].parts[0].parent, p.\n transforms_stack[0])\n\n def test_requirements(self):\n p = beam.Pipeline()\n _ = p | beam.Create([]) | beam.ParDo(lambda x, finalize=beam.DoFn.\n BundleFinalizerParam: None)\n proto = p.to_runner_api()\n self.assertTrue(common_urns.requirements.\n REQUIRES_BUNDLE_FINALIZATION.urn, proto.requirements)\n\n def test_annotations(self):\n some_proto = BytesCoder().to_runner_api(None)\n\n\n class EmptyTransform(beam.PTransform):\n\n def expand(self, pcoll):\n return pcoll\n\n def annotations(self):\n return {'foo': 'some_string'}\n\n\n class NonEmptyTransform(beam.PTransform):\n\n def expand(self, pcoll):\n return pcoll | beam.Map(lambda x: x)\n\n def annotations(self):\n return {'foo': b'some_bytes', 'proto': some_proto}\n p = beam.Pipeline()\n _ = p | beam.Create([]) | EmptyTransform() | NonEmptyTransform()\n proto = p.to_runner_api()\n seen = 0\n for transform in proto.components.transforms.values():\n if transform.unique_name == 'EmptyTransform':\n seen += 1\n self.assertEqual(transform.annotations['foo'], b'some_string')\n elif transform.unique_name == 'NonEmptyTransform':\n seen += 1\n self.assertEqual(transform.annotations['foo'], b'some_bytes')\n self.assertEqual(transform.annotations['proto'], some_proto\n .SerializeToString())\n self.assertEqual(seen, 2)\n\n def test_transform_ids(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n p = beam.Pipeline()\n p | MyPTransform()\n runner_api_proto = Pipeline.to_runner_api(p)\n for transform_id in runner_api_proto.components.transforms:\n self.assertRegex(transform_id, '[a-zA-Z0-9-_]+')\n\n def test_input_names(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, pcolls):\n return pcolls.values() | beam.Flatten()\n p = beam.Pipeline()\n input_names = set('ABC')\n inputs = {x: (p | x >> beam.Create([x])) for x in input_names}\n inputs | MyPTransform()\n runner_api_proto = Pipeline.to_runner_api(p)\n for transform_proto in runner_api_proto.components.transforms.values():\n if transform_proto.unique_name == 'MyPTransform':\n self.assertEqual(set(transform_proto.inputs.keys()),\n input_names)\n break\n else:\n self.fail('Unable to find transform.')\n\n def test_display_data(self):\n\n\n class MyParentTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n def display_data(self):\n parent_dd = super().display_data()\n parent_dd['p_dd_string'] = DisplayDataItem('p_dd_string_value',\n label='p_dd_string_label')\n parent_dd['p_dd_string_2'] = DisplayDataItem(\n 'p_dd_string_value_2')\n parent_dd['p_dd_bool'] = DisplayDataItem(True, label=\n 'p_dd_bool_label')\n parent_dd['p_dd_int'] = DisplayDataItem(1, label=\n 'p_dd_int_label')\n return parent_dd\n\n\n class MyPTransform(MyParentTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n def display_data(self):\n parent_dd = super().display_data()\n parent_dd['dd_string'] = DisplayDataItem('dd_string_value',\n label='dd_string_label')\n parent_dd['dd_string_2'] = DisplayDataItem('dd_string_value_2')\n parent_dd['dd_bool'] = DisplayDataItem(False, label=\n 'dd_bool_label')\n parent_dd['dd_double'] = DisplayDataItem(1.1, label=\n 'dd_double_label')\n return parent_dd\n p = beam.Pipeline()\n p | MyPTransform()\n proto_pipeline = Pipeline.to_runner_api(p, use_fake_coders=True)\n my_transform, = [transform for transform in proto_pipeline.\n components.transforms.values() if transform.unique_name ==\n 'MyPTransform']\n self.assertIsNotNone(my_transform)\n self.assertListEqual(list(my_transform.display_data), [\n beam_runner_api_pb2.DisplayData(urn=common_urns.\n StandardDisplayData.DisplayData.LABELLED.urn, payload=\n beam_runner_api_pb2.LabelledPayload(label='p_dd_string_label',\n key='p_dd_string', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'p_dd_string_value').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='p_dd_string_2', key='p_dd_string_2', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'p_dd_string_value_2').SerializeToString()),\n beam_runner_api_pb2.DisplayData(urn=common_urns.\n StandardDisplayData.DisplayData.LABELLED.urn, payload=\n beam_runner_api_pb2.LabelledPayload(label='p_dd_bool_label',\n key='p_dd_bool', namespace=\n 'apache_beam.pipeline_test.MyPTransform', bool_value=True).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'p_dd_int_label', key='p_dd_int', namespace=\n 'apache_beam.pipeline_test.MyPTransform', int_value=1).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'dd_string_label', key='dd_string', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'dd_string_value').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='dd_string_2', key='dd_string_2', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'dd_string_value_2').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='dd_bool_label', key='dd_bool', namespace=\n 'apache_beam.pipeline_test.MyPTransform', bool_value=False).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'dd_double_label', key='dd_double', namespace=\n 'apache_beam.pipeline_test.MyPTransform', double_value=1.1).\n SerializeToString())])\n\n def test_runner_api_roundtrip_preserves_resource_hints(self):\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | beam.Map(lambda x: x + 1\n ).with_resource_hints(accelerator='gpu')\n self.assertEqual(p.transforms_stack[0].parts[1].transform.\n get_resource_hints(), {common_urns.resource_hints.ACCELERATOR.\n urn: b'gpu'})\n for _ in range(3):\n p = Pipeline.from_runner_api(Pipeline.to_runner_api(p), None, None)\n self.assertEqual(p.transforms_stack[0].parts[1].transform.\n get_resource_hints(), {common_urns.resource_hints.\n ACCELERATOR.urn: b'gpu'})\n\n def test_hints_on_composite_transforms_are_propagated_to_subtransforms(self\n ):\n\n\n class FooHint(ResourceHint):\n urn = 'foo_urn'\n\n\n class BarHint(ResourceHint):\n urn = 'bar_urn'\n\n\n class BazHint(ResourceHint):\n urn = 'baz_urn'\n\n\n class QuxHint(ResourceHint):\n urn = 'qux_urn'\n\n\n class UseMaxValueHint(ResourceHint):\n urn = 'use_max_value_urn'\n\n @classmethod\n def get_merged_value(cls, outer_value, inner_value):\n return ResourceHint._use_max(outer_value, inner_value)\n ResourceHint.register_resource_hint('foo_hint', FooHint)\n ResourceHint.register_resource_hint('bar_hint', BarHint)\n ResourceHint.register_resource_hint('baz_hint', BazHint)\n ResourceHint.register_resource_hint('qux_hint', QuxHint)\n ResourceHint.register_resource_hint('use_max_value_hint',\n UseMaxValueHint)\n\n @beam.ptransform_fn\n def SubTransform(pcoll):\n return pcoll | beam.Map(lambda x: x + 1).with_resource_hints(\n foo_hint='set_on_subtransform', use_max_value_hint='10')\n\n @beam.ptransform_fn\n def CompositeTransform(pcoll):\n return pcoll | beam.Map(lambda x: x * 2) | SubTransform()\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | CompositeTransform().with_resource_hints(\n foo_hint='should_be_overriden_by_subtransform', bar_hint=\n 'set_on_composite', baz_hint='set_on_composite',\n use_max_value_hint='100')\n options = PortableOptions([\n '--resource_hint=baz_hint=should_be_overriden_by_composite',\n '--resource_hint=qux_hint=set_via_options',\n '--environment_type=PROCESS',\n '--environment_option=process_command=foo',\n '--sdk_location=container'])\n environment = ProcessEnvironment.from_options(options)\n proto = Pipeline.to_runner_api(p, default_environment=environment)\n for t in proto.components.transforms.values():\n if 'CompositeTransform/SubTransform/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(environment.resource_hints.get('foo_urn'),\n b'set_on_subtransform')\n self.assertEqual(environment.resource_hints.get('bar_urn'),\n b'set_on_composite')\n self.assertEqual(environment.resource_hints.get('baz_urn'),\n b'set_on_composite')\n self.assertEqual(environment.resource_hints.get('qux_urn'),\n b'set_via_options')\n self.assertEqual(environment.resource_hints.get(\n 'use_max_value_urn'), b'100')\n found = True\n assert found\n\n def test_environments_with_same_resource_hints_are_reused(self):\n\n\n class HintX(ResourceHint):\n urn = 'X_urn'\n\n\n class HintY(ResourceHint):\n urn = 'Y_urn'\n\n\n class HintIsOdd(ResourceHint):\n urn = 'IsOdd_urn'\n ResourceHint.register_resource_hint('X', HintX)\n ResourceHint.register_resource_hint('Y', HintY)\n ResourceHint.register_resource_hint('IsOdd', HintIsOdd)\n p = beam.Pipeline()\n num_iter = 4\n for i in range(num_iter):\n _ = p | f'NoHintCreate_{i}' >> beam.Create([1, 2]\n ) | f'NoHint_{i}' >> beam.Map(lambda x: x + 1)\n _ = p | f'XCreate_{i}' >> beam.Create([1, 2]\n ) | f'HintX_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(X='X')\n _ = p | f'XYCreate_{i}' >> beam.Create([1, 2]\n ) | f'HintXY_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(X='X', Y='Y')\n _ = p | f'IsOddCreate_{i}' >> beam.Create([1, 2]\n ) | f'IsOdd_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(IsOdd=str(i % 2 != 0))\n proto = Pipeline.to_runner_api(p)\n count_x = count_xy = count_is_odd = count_no_hints = 0\n env_ids = set()\n for _, t in proto.components.transforms.items():\n env = proto.components.environments[t.environment_id]\n if t.unique_name.startswith('HintX_'):\n count_x += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {'X_urn': b'X'})\n if t.unique_name.startswith('HintXY_'):\n count_xy += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {'X_urn': b'X',\n 'Y_urn': b'Y'})\n if t.unique_name.startswith('NoHint_'):\n count_no_hints += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {})\n if t.unique_name.startswith('IsOdd_'):\n count_is_odd += 1\n env_ids.add(t.environment_id)\n self.assertTrue(env.resource_hints == {'IsOdd_urn': b'True'\n } or env.resource_hints == {'IsOdd_urn': b'False'})\n assert count_x == count_is_odd == count_xy == count_no_hints == num_iter\n assert num_iter > 1\n self.assertEqual(len(env_ids), 5)\n\n def test_multiple_application_of_the_same_transform_set_different_hints(\n self):\n\n\n class FooHint(ResourceHint):\n urn = 'foo_urn'\n\n\n class UseMaxValueHint(ResourceHint):\n urn = 'use_max_value_urn'\n\n @classmethod\n def get_merged_value(cls, outer_value, inner_value):\n return ResourceHint._use_max(outer_value, inner_value)\n ResourceHint.register_resource_hint('foo_hint', FooHint)\n ResourceHint.register_resource_hint('use_max_value_hint',\n UseMaxValueHint)\n\n @beam.ptransform_fn\n def SubTransform(pcoll):\n return pcoll | beam.Map(lambda x: x + 1)\n\n @beam.ptransform_fn\n def CompositeTransform(pcoll):\n sub = SubTransform()\n return pcoll | 'first' >> sub.with_resource_hints(foo_hint=\n 'first_application') | 'second' >> sub.with_resource_hints(\n foo_hint='second_application')\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | CompositeTransform()\n proto = Pipeline.to_runner_api(p)\n count = 0\n for t in proto.components.transforms.values():\n if 'CompositeTransform/first/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(b'first_application', environment.\n resource_hints.get('foo_urn'))\n count += 1\n if 'CompositeTransform/second/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(b'second_application', environment.\n resource_hints.get('foo_urn'))\n count += 1\n assert count == 2\n\n def test_environments_are_deduplicated(self):\n\n def file_artifact(path, hash, staged_name):\n return beam_runner_api_pb2.ArtifactInformation(type_urn=\n common_urns.artifact_types.FILE.urn, type_payload=\n beam_runner_api_pb2.ArtifactFilePayload(path=path, sha256=\n hash).SerializeToString(), role_urn=common_urns.\n artifact_roles.STAGING_TO.urn, role_payload=\n beam_runner_api_pb2.ArtifactStagingToRolePayload(\n staged_name=staged_name).SerializeToString())\n proto = beam_runner_api_pb2.Pipeline(components=beam_runner_api_pb2\n .Components(transforms={f'transform{ix}': beam_runner_api_pb2.\n PTransform(environment_id=f'e{ix}') for ix in range(8)},\n environments={'e1': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest')]), 'e2':\n beam_runner_api_pb2.Environment(dependencies=[file_artifact(\n 'a2', 'x', 'dest')]), 'e3': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a3', 'y', 'dest')]), 'e4':\n beam_runner_api_pb2.Environment(dependencies=[file_artifact(\n 'a4', 'y', 'dest2')]), 'e5': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest'), file_artifact(\n 'b1', 'xb', 'destB')]), 'e6': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a2', 'x', 'dest'), file_artifact(\n 'b2', 'xb', 'destB')]), 'e7': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest'), file_artifact(\n 'b2', 'y', 'destB')]), 'e0': beam_runner_api_pb2.Environment(\n resource_hints={'hint': b'value'}, dependencies=[file_artifact(\n 'a1', 'x', 'dest')])}))\n Pipeline.merge_compatible_environments(proto)\n self.assertEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform2'].\n environment_id)\n self.assertEqual(proto.components.transforms['transform5'].\n environment_id, proto.components.transforms['transform6'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform3'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform4'].\n environment_id, proto.components.transforms['transform3'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform6'].\n environment_id, proto.components.transforms['transform7'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform0'].\n environment_id)\n self.assertEqual(len(proto.components.environments), 6)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass AddWithProductDoFn(beam.DoFn):\n\n def process(self, input, a, b):\n yield input + a * b\n\n\nclass AddThenMultiplyDoFn(beam.DoFn):\n\n def process(self, input, a, b):\n yield (input + a) * b\n\n\nclass AddThenMultiply(beam.PTransform):\n\n def expand(self, pvalues):\n return pvalues[0] | beam.ParDo(AddThenMultiplyDoFn(), AsSingleton(\n pvalues[1]), AsSingleton(pvalues[2]))\n\n\nclass PipelineTest(unittest.TestCase):\n\n @staticmethod\n def custom_callable(pcoll):\n return pcoll | '+1' >> FlatMap(lambda x: [x + 1])\n\n\n class CustomTransform(PTransform):\n\n def expand(self, pcoll):\n return pcoll | '+1' >> FlatMap(lambda x: [x + 1])\n\n\n class Visitor(PipelineVisitor):\n\n def __init__(self, visited):\n self.visited = visited\n self.enter_composite = []\n self.leave_composite = []\n\n def visit_value(self, value, _):\n self.visited.append(value)\n\n def enter_composite_transform(self, transform_node):\n self.enter_composite.append(transform_node)\n\n def leave_composite_transform(self, transform_node):\n self.leave_composite.append(transform_node)\n\n def test_create(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'label1' >> Create([1, 2, 3])\n assert_that(pcoll, equal_to([1, 2, 3]))\n pcoll2 = pipeline | 'label2' >> Create(iter((4, 5, 6)))\n pcoll3 = pcoll2 | 'do' >> FlatMap(lambda x: [x + 10])\n assert_that(pcoll3, equal_to([14, 15, 16]), label='pcoll3')\n\n def test_flatmap_builtin(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'label1' >> Create([1, 2, 3])\n assert_that(pcoll, equal_to([1, 2, 3]))\n pcoll2 = pcoll | 'do' >> FlatMap(lambda x: [x + 10])\n assert_that(pcoll2, equal_to([11, 12, 13]), label='pcoll2')\n pcoll3 = pcoll2 | 'm1' >> Map(lambda x: [x, 12])\n assert_that(pcoll3, equal_to([[11, 12], [12, 12], [13, 12]]),\n label='pcoll3')\n pcoll4 = pcoll3 | 'do2' >> FlatMap(set)\n assert_that(pcoll4, equal_to([11, 12, 12, 12, 13]), label='pcoll4')\n\n def test_maptuple_builtin(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | Create([('e1', 'e2')])\n side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create([\n 's1']))\n side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create([\n 's2']))\n fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (e1,\n e2, t, s1, s2)\n assert_that(pcoll | 'NoSides' >> beam.core.MapTuple(fn),\n equal_to([('e1', 'e2', MIN_TIMESTAMP, None, None)]), label=\n 'NoSidesCheck')\n assert_that(pcoll | 'StaticSides' >> beam.core.MapTuple(fn,\n 's1', 's2'), equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1',\n 's2')]), label='StaticSidesCheck')\n assert_that(pcoll | 'DynamicSides' >> beam.core.MapTuple(fn,\n side1, side2), equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1',\n 's2')]), label='DynamicSidesCheck')\n assert_that(pcoll | 'MixedSides' >> beam.core.MapTuple(fn, s2=\n side2), equal_to([('e1', 'e2', MIN_TIMESTAMP, None, 's2')]),\n label='MixedSidesCheck')\n\n def test_flatmaptuple_builtin(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | Create([('e1', 'e2')])\n side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create([\n 's1']))\n side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create([\n 's2']))\n fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (e1,\n e2, t, s1, s2)\n assert_that(pcoll | 'NoSides' >> beam.core.FlatMapTuple(fn),\n equal_to(['e1', 'e2', MIN_TIMESTAMP, None, None]), label=\n 'NoSidesCheck')\n assert_that(pcoll | 'StaticSides' >> beam.core.FlatMapTuple(fn,\n 's1', 's2'), equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1',\n 's2']), label='StaticSidesCheck')\n assert_that(pcoll | 'DynamicSides' >> beam.core.FlatMapTuple(fn,\n side1, side2), equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1',\n 's2']), label='DynamicSidesCheck')\n assert_that(pcoll | 'MixedSides' >> beam.core.FlatMapTuple(fn,\n s2=side2), equal_to(['e1', 'e2', MIN_TIMESTAMP, None, 's2']\n ), label='MixedSidesCheck')\n\n def test_create_singleton_pcollection(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'label' >> Create([[1, 2, 3]])\n assert_that(pcoll, equal_to([[1, 2, 3]]))\n\n def test_visit_entire_graph(self):\n pipeline = Pipeline()\n pcoll1 = pipeline | 'pcoll' >> beam.Impulse()\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n pcoll4 = pcoll2 | 'do3' >> FlatMap(lambda x: [x + 1])\n transform = PipelineTest.CustomTransform()\n pcoll5 = pcoll4 | transform\n visitor = PipelineTest.Visitor(visited=[])\n pipeline.visit(visitor)\n self.assertEqual({pcoll1, pcoll2, pcoll3, pcoll4, pcoll5}, set(\n visitor.visited))\n self.assertEqual(set(visitor.enter_composite), set(visitor.\n leave_composite))\n self.assertEqual(2, len(visitor.enter_composite))\n self.assertEqual(visitor.enter_composite[1].transform, transform)\n self.assertEqual(visitor.leave_composite[0].transform, transform)\n\n def test_apply_custom_transform(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'pcoll' >> Create([1, 2, 3])\n result = pcoll | PipelineTest.CustomTransform()\n assert_that(result, equal_to([2, 3, 4]))\n\n def test_reuse_custom_transform_instance(self):\n pipeline = Pipeline()\n pcoll1 = pipeline | 'pcoll1' >> Create([1, 2, 3])\n pcoll2 = pipeline | 'pcoll2' >> Create([4, 5, 6])\n transform = PipelineTest.CustomTransform()\n pcoll1 | transform\n with self.assertRaises(RuntimeError) as cm:\n pipeline.apply(transform, pcoll2)\n self.assertEqual(cm.exception.args[0],\n 'A transform with label \"CustomTransform\" already exists in the pipeline. To apply a transform with a specified label write pvalue | \"label\" >> transform'\n )\n\n def test_reuse_cloned_custom_transform_instance(self):\n with TestPipeline() as pipeline:\n pcoll1 = pipeline | 'pc1' >> Create([1, 2, 3])\n pcoll2 = pipeline | 'pc2' >> Create([4, 5, 6])\n transform = PipelineTest.CustomTransform()\n result1 = pcoll1 | transform\n result2 = pcoll2 | 'new_label' >> transform\n assert_that(result1, equal_to([2, 3, 4]), label='r1')\n assert_that(result2, equal_to([5, 6, 7]), label='r2')\n\n def test_transform_no_super_init(self):\n\n\n class AddSuffix(PTransform):\n\n def __init__(self, suffix):\n self.suffix = suffix\n\n def expand(self, pcoll):\n return pcoll | Map(lambda x: x + self.suffix)\n self.assertEqual(['a-x', 'b-x', 'c-x'], sorted(['a', 'b', 'c'] | \n 'AddSuffix' >> AddSuffix('-x')))\n\n @unittest.skip('Fails on some platforms with new urllib3.')\n def test_memory_usage(self):\n try:\n import resource\n except ImportError:\n self.skipTest('resource module not available.')\n if platform.mac_ver()[0]:\n self.skipTest('ru_maxrss is not in standard units.')\n\n def get_memory_usage_in_bytes():\n return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 2 ** 10\n\n def check_memory(value, memory_threshold):\n memory_usage = get_memory_usage_in_bytes()\n if memory_usage > memory_threshold:\n raise RuntimeError('High memory usage: %d > %d' % (\n memory_usage, memory_threshold))\n return value\n len_elements = 1000000\n num_elements = 10\n num_maps = 100\n with TestPipeline(runner='BundleBasedDirectRunner') as pipeline:\n memory_threshold = get_memory_usage_in_bytes(\n ) + 5 * len_elements * num_elements\n memory_threshold += 10 * 2 ** 20\n biglist = pipeline | 'oom:create' >> Create(['x' * len_elements\n ] * num_elements)\n for i in range(num_maps):\n biglist = biglist | 'oom:addone-%d' % i >> Map(lambda x: x +\n 'y')\n result = biglist | 'oom:check' >> Map(check_memory,\n memory_threshold)\n assert_that(result, equal_to(['x' * len_elements + 'y' *\n num_maps] * num_elements))\n\n def test_aggregator_empty_input(self):\n actual = [] | CombineGlobally(max).without_defaults()\n self.assertEqual(actual, [])\n\n def test_pipeline_as_context(self):\n\n def raise_exception(exn):\n raise exn\n with self.assertRaises(ValueError):\n with Pipeline() as p:\n p | Create([ValueError('msg')]) | Map(raise_exception)\n\n def test_ptransform_overrides(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n ptransform = applied_ptransform.transform\n if isinstance(ptransform, DoubleParDo):\n return TripleParDo()\n raise ValueError('Unsupported type of transform: %r' %\n ptransform)\n p = Pipeline()\n pcoll = p | beam.Create([1, 2, 3]) | 'Multiply' >> DoubleParDo()\n assert_that(pcoll, equal_to([3, 6, 9]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_type_hints(self):\n\n\n class NoTypeHintOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n return ToStringParDo()\n\n\n class WithTypeHintOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n return ToStringParDo().with_input_types(int).with_output_types(\n str)\n for override, expected_type in [(NoTypeHintOverride(), int), (\n WithTypeHintOverride(), str)]:\n p = TestPipeline()\n pcoll = p | beam.Create([1, 2, 3]) | 'Operate' >> DoubleParDo(\n ) | 'NoOp' >> beam.Map(lambda x: x)\n p.replace_all([override])\n self.assertEqual(pcoll.producer.inputs[0].element_type,\n expected_type)\n\n def test_ptransform_override_multiple_inputs(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform,\n FlattenAndDouble)\n\n def get_replacement_transform(self, applied_ptransform):\n return FlattenAndTriple()\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([1, 2, 3])\n pcoll2 = p | 'pc2' >> beam.Create([4, 5, 6])\n pcoll3 = (pcoll1, pcoll2) | 'FlattenAndMultiply' >> FlattenAndDouble()\n assert_that(pcoll3, equal_to([3, 6, 9, 12, 15, 18]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_side_inputs(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, ParDo\n ) and isinstance(applied_ptransform.transform.fn,\n AddWithProductDoFn)\n\n def get_replacement_transform(self, transform):\n return AddThenMultiply()\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([2])\n pcoll2 = p | 'pc2' >> beam.Create([3])\n pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])\n result = pcoll3 | 'Operate' >> beam.ParDo(AddWithProductDoFn(),\n AsSingleton(pcoll1), AsSingleton(pcoll2))\n assert_that(result, equal_to([18, 21, 24]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_replacement_inputs(self):\n\n\n class MyParDoOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, ParDo\n ) and isinstance(applied_ptransform.transform.fn,\n AddWithProductDoFn)\n\n def get_replacement_transform(self, transform):\n return AddThenMultiply()\n\n def get_replacement_inputs(self, applied_ptransform):\n assert len(applied_ptransform.inputs) == 1\n assert len(applied_ptransform.side_inputs) == 2\n return applied_ptransform.inputs[0\n ], applied_ptransform.side_inputs[1\n ].pvalue, applied_ptransform.side_inputs[0].pvalue\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([2])\n pcoll2 = p | 'pc2' >> beam.Create([3])\n pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])\n result = pcoll3 | 'Operate' >> beam.ParDo(AddWithProductDoFn(),\n AsSingleton(pcoll1), AsSingleton(pcoll2))\n assert_that(result, equal_to([14, 16, 18]))\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_multiple_outputs(self):\n\n\n class MultiOutputComposite(PTransform):\n\n def __init__(self):\n self.output_tags = set()\n\n def expand(self, pcoll):\n\n def mux_input(x):\n x = x * 2\n if isinstance(x, int):\n yield TaggedOutput('numbers', x)\n else:\n yield TaggedOutput('letters', x)\n multi = pcoll | 'MyReplacement' >> beam.ParDo(mux_input\n ).with_outputs()\n letters = multi.letters | 'LettersComposite' >> beam.Map(lambda\n x: x * 3)\n numbers = multi.numbers | 'NumbersComposite' >> beam.Map(lambda\n x: x * 5)\n return {'letters': letters, 'numbers': numbers}\n\n\n class MultiOutputOverride(PTransformOverride):\n\n def matches(self, applied_ptransform):\n return applied_ptransform.full_label == 'MyMultiOutput'\n\n def get_replacement_transform_for_applied_ptransform(self,\n applied_ptransform):\n return MultiOutputComposite()\n\n def mux_input(x):\n if isinstance(x, int):\n yield TaggedOutput('numbers', x)\n else:\n yield TaggedOutput('letters', x)\n with TestPipeline() as p:\n multi = p | beam.Create([1, 2, 3, 'a', 'b', 'c']\n ) | 'MyMultiOutput' >> beam.ParDo(mux_input).with_outputs()\n letters = multi.letters | 'MyLetters' >> beam.Map(lambda x: x)\n numbers = multi.numbers | 'MyNumbers' >> beam.Map(lambda x: x)\n assert_that(letters, equal_to(['a' * 2 * 3, 'b' * 2 * 3, 'c' * \n 2 * 3]), label='assert letters')\n assert_that(numbers, equal_to([1 * 2 * 5, 2 * 2 * 5, 3 * 2 * 5]\n ), label='assert numbers')\n p.replace_all([MultiOutputOverride()])\n visitor = PipelineTest.Visitor(visited=[])\n p.visit(visitor)\n pcollections = visitor.visited\n composites = visitor.enter_composite\n self.assertIn(MultiOutputComposite, [t.transform.__class__ for t in\n composites])\n multi_output_composite = list(filter(lambda t: t.transform.\n __class__ == MultiOutputComposite, composites))[0]\n for output in multi_output_composite.outputs.values():\n self.assertIn(output, pcollections)\n self.assertNotIn(multi[None], visitor.visited)\n self.assertNotIn(multi.letters, visitor.visited)\n self.assertNotIn(multi.numbers, visitor.visited)\n\n def test_kv_ptransform_honor_type_hints(self):\n\n\n class StatefulDoFn(DoFn):\n BYTES_STATE = BagStateSpec('bytes', BytesCoder())\n\n def return_recursive(self, count):\n if count == 0:\n return ['some string']\n else:\n self.return_recursive(count - 1)\n\n def process(self, element, counter=DoFn.StateParam(BYTES_STATE)):\n return self.return_recursive(1)\n with TestPipeline() as p:\n pcoll = p | beam.Create([(1, 1), (2, 2), (3, 3)]\n ) | beam.GroupByKey() | beam.ParDo(StatefulDoFn())\n self.assertEqual(pcoll.element_type, typehints.Any)\n with TestPipeline() as p:\n pcoll = p | beam.Create([(1, 1), (2, 2), (3, 3)]\n ) | beam.GroupByKey() | beam.ParDo(StatefulDoFn()\n ).with_output_types(str)\n self.assertEqual(pcoll.element_type, str)\n\n def test_track_pcoll_unbounded(self):\n pipeline = TestPipeline()\n pcoll1 = pipeline | 'read' >> Read(FakeUnboundedSource())\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n self.assertIs(pcoll1.is_bounded, False)\n self.assertIs(pcoll2.is_bounded, False)\n self.assertIs(pcoll3.is_bounded, False)\n\n def test_track_pcoll_bounded(self):\n pipeline = TestPipeline()\n pcoll1 = pipeline | 'label1' >> Create([1, 2, 3])\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n self.assertIs(pcoll1.is_bounded, True)\n self.assertIs(pcoll2.is_bounded, True)\n self.assertIs(pcoll3.is_bounded, True)\n\n def test_track_pcoll_bounded_flatten(self):\n pipeline = TestPipeline()\n pcoll1_a = pipeline | 'label_a' >> Create([1, 2, 3])\n pcoll2_a = pcoll1_a | 'do_a' >> FlatMap(lambda x: [x + 1])\n pcoll1_b = pipeline | 'label_b' >> Create([1, 2, 3])\n pcoll2_b = pcoll1_b | 'do_b' >> FlatMap(lambda x: [x + 1])\n merged = (pcoll2_a, pcoll2_b) | beam.Flatten()\n self.assertIs(pcoll1_a.is_bounded, True)\n self.assertIs(pcoll2_a.is_bounded, True)\n self.assertIs(pcoll1_b.is_bounded, True)\n self.assertIs(pcoll2_b.is_bounded, True)\n self.assertIs(merged.is_bounded, True)\n\n def test_track_pcoll_unbounded_flatten(self):\n pipeline = TestPipeline()\n pcoll1_bounded = pipeline | 'label1' >> Create([1, 2, 3])\n pcoll2_bounded = pcoll1_bounded | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll1_unbounded = pipeline | 'read' >> Read(FakeUnboundedSource())\n pcoll2_unbounded = pcoll1_unbounded | 'do2' >> FlatMap(lambda x: [x +\n 1])\n merged = (pcoll2_bounded, pcoll2_unbounded) | beam.Flatten()\n self.assertIs(pcoll1_bounded.is_bounded, True)\n self.assertIs(pcoll2_bounded.is_bounded, True)\n self.assertIs(pcoll1_unbounded.is_bounded, False)\n self.assertIs(pcoll2_unbounded.is_bounded, False)\n self.assertIs(merged.is_bounded, False)\n\n def test_incompatible_submission_and_runtime_envs_fail_pipeline(self):\n with mock.patch(\n 'apache_beam.transforms.environments.sdk_base_version_capability'\n ) as base_version:\n base_version.side_effect = [\n f'beam:version:sdk_base:apache/beam_python3.5_sdk:2.{i}.0' for\n i in range(100)]\n with self.assertRaisesRegex(RuntimeError,\n 'Pipeline construction environment and pipeline runtime environment are not compatible.'\n ):\n with TestPipeline() as p:\n _ = p | Create([None])\n\n\nclass DoFnTest(unittest.TestCase):\n\n def test_element(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element):\n yield element + 10\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(\n TestDoFn())\n assert_that(pcoll, equal_to([11, 12]))\n\n def test_side_input_no_tag(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, prefix, suffix):\n return ['%s-%s-%s' % (prefix, element, suffix)]\n with TestPipeline() as pipeline:\n words_list = ['aa', 'bb', 'cc']\n words = pipeline | 'SomeWords' >> Create(words_list)\n prefix = 'zyx'\n suffix = pipeline | 'SomeString' >> Create(['xyz'])\n result = words | 'DecorateWordsDoFnNoTag' >> ParDo(TestDoFn(),\n prefix, suffix=AsSingleton(suffix))\n assert_that(result, equal_to([('zyx-%s-xyz' % x) for x in\n words_list]))\n\n def test_side_input_tagged(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, prefix, suffix=DoFn.SideInputParam):\n return ['%s-%s-%s' % (prefix, element, suffix)]\n with TestPipeline() as pipeline:\n words_list = ['aa', 'bb', 'cc']\n words = pipeline | 'SomeWords' >> Create(words_list)\n prefix = 'zyx'\n suffix = pipeline | 'SomeString' >> Create(['xyz'])\n result = words | 'DecorateWordsDoFnNoTag' >> ParDo(TestDoFn(),\n prefix, suffix=AsSingleton(suffix))\n assert_that(result, equal_to([('zyx-%s-xyz' % x) for x in\n words_list]))\n\n @pytest.mark.it_validatesrunner\n def test_element_param(self):\n pipeline = TestPipeline()\n input = [1, 2]\n pcoll = pipeline | 'Create' >> Create(input) | 'Ele param' >> Map(\n lambda element=DoFn.ElementParam: element)\n assert_that(pcoll, equal_to(input))\n pipeline.run()\n\n @pytest.mark.it_validatesrunner\n def test_key_param(self):\n pipeline = TestPipeline()\n pcoll = pipeline | 'Create' >> Create([('a', 1), ('b', 2)]\n ) | 'Key param' >> Map(lambda _, key=DoFn.KeyParam: key)\n assert_that(pcoll, equal_to(['a', 'b']))\n pipeline.run()\n\n def test_window_param(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, window=DoFn.WindowParam):\n yield element, (float(window.start), float(window.end))\n with TestPipeline() as pipeline:\n pcoll = pipeline | Create([1, 7]) | Map(lambda x:\n TimestampedValue(x, x)) | WindowInto(windowfn=\n SlidingWindows(10, 5)) | ParDo(TestDoFn())\n assert_that(pcoll, equal_to([(1, (-5, 5)), (1, (0, 10)), (7, (0,\n 10)), (7, (5, 15))]))\n pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn())\n assert_that(pcoll2, equal_to([((1, (-5, 5)), (-5, 5)), ((1, (0,\n 10)), (0, 10)), ((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5,\n 15))]), label='doubled windows')\n\n def test_timestamp_param(self):\n\n\n class TestDoFn(DoFn):\n\n def process(self, element, timestamp=DoFn.TimestampParam):\n yield timestamp\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(\n TestDoFn())\n assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))\n\n def test_timestamp_param_map(self):\n with TestPipeline() as p:\n assert_that(p | Create([1, 2]) | beam.Map(lambda _, t=DoFn.\n TimestampParam: t), equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))\n\n def test_pane_info_param(self):\n with TestPipeline() as p:\n pc = p | Create([(None, None)])\n assert_that(pc | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),\n equal_to([windowed_value.PANE_INFO_UNKNOWN]), label=\n 'CheckUngrouped')\n assert_that(pc | beam.GroupByKey() | beam.Map(lambda _, p=DoFn.\n PaneInfoParam: p), equal_to([windowed_value.PaneInfo(\n is_first=True, is_last=True, timing=windowed_value.\n PaneInfoTiming.ON_TIME, index=0, nonspeculative_index=0)]),\n label='CheckGrouped')\n\n def test_incomparable_default(self):\n\n\n class IncomparableType(object):\n\n def __eq__(self, other):\n raise RuntimeError()\n\n def __ne__(self, other):\n raise RuntimeError()\n\n def __hash__(self):\n raise RuntimeError()\n with TestPipeline() as pipeline:\n pcoll = pipeline | beam.Create([None]) | Map(lambda e, x=\n IncomparableType(): (e, type(x).__name__))\n assert_that(pcoll, equal_to([(None, 'IncomparableType')]))\n\n\nclass Bacon(PipelineOptions):\n\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument('--slices', type=int)\n\n\nclass Eggs(PipelineOptions):\n\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument('--style', default='scrambled')\n\n\nclass Breakfast(Bacon, Eggs):\n pass\n\n\nclass PipelineOptionsTest(unittest.TestCase):\n\n def test_flag_parsing(self):\n options = Breakfast(['--slices=3', '--style=sunny side up',\n '--ignored'])\n self.assertEqual(3, options.slices)\n self.assertEqual('sunny side up', options.style)\n\n def test_keyword_parsing(self):\n options = Breakfast(['--slices=3', '--style=sunny side up',\n '--ignored'], slices=10)\n self.assertEqual(10, options.slices)\n self.assertEqual('sunny side up', options.style)\n\n def test_attribute_setting(self):\n options = Breakfast(slices=10)\n self.assertEqual(10, options.slices)\n options.slices = 20\n self.assertEqual(20, options.slices)\n\n def test_view_as(self):\n generic_options = PipelineOptions(['--slices=3'])\n self.assertEqual(3, generic_options.view_as(Bacon).slices)\n self.assertEqual(3, generic_options.view_as(Breakfast).slices)\n generic_options.view_as(Breakfast).slices = 10\n self.assertEqual(10, generic_options.view_as(Bacon).slices)\n with self.assertRaises(AttributeError):\n generic_options.slices\n with self.assertRaises(AttributeError):\n generic_options.view_as(Eggs).slices\n\n def test_defaults(self):\n options = Breakfast(['--slices=3'])\n self.assertEqual(3, options.slices)\n self.assertEqual('scrambled', options.style)\n\n def test_dir(self):\n options = Breakfast()\n self.assertEqual({'from_dictionary', 'get_all_options', 'slices',\n 'style', 'view_as', 'display_data'}, {attr for attr in dir(\n options) if not attr.startswith('_') and attr != 'next'})\n self.assertEqual({'from_dictionary', 'get_all_options', 'style',\n 'view_as', 'display_data'}, {attr for attr in dir(options.\n view_as(Eggs)) if not attr.startswith('_') and attr != 'next'})\n\n\nclass RunnerApiTest(unittest.TestCase):\n\n def test_parent_pointer(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n p = beam.Pipeline()\n p | MyPTransform()\n p = Pipeline.from_runner_api(Pipeline.to_runner_api(p,\n use_fake_coders=True), None, None)\n self.assertIsNotNone(p.transforms_stack[0].parts[0].parent)\n self.assertEqual(p.transforms_stack[0].parts[0].parent, p.\n transforms_stack[0])\n\n def test_requirements(self):\n p = beam.Pipeline()\n _ = p | beam.Create([]) | beam.ParDo(lambda x, finalize=beam.DoFn.\n BundleFinalizerParam: None)\n proto = p.to_runner_api()\n self.assertTrue(common_urns.requirements.\n REQUIRES_BUNDLE_FINALIZATION.urn, proto.requirements)\n\n def test_annotations(self):\n some_proto = BytesCoder().to_runner_api(None)\n\n\n class EmptyTransform(beam.PTransform):\n\n def expand(self, pcoll):\n return pcoll\n\n def annotations(self):\n return {'foo': 'some_string'}\n\n\n class NonEmptyTransform(beam.PTransform):\n\n def expand(self, pcoll):\n return pcoll | beam.Map(lambda x: x)\n\n def annotations(self):\n return {'foo': b'some_bytes', 'proto': some_proto}\n p = beam.Pipeline()\n _ = p | beam.Create([]) | EmptyTransform() | NonEmptyTransform()\n proto = p.to_runner_api()\n seen = 0\n for transform in proto.components.transforms.values():\n if transform.unique_name == 'EmptyTransform':\n seen += 1\n self.assertEqual(transform.annotations['foo'], b'some_string')\n elif transform.unique_name == 'NonEmptyTransform':\n seen += 1\n self.assertEqual(transform.annotations['foo'], b'some_bytes')\n self.assertEqual(transform.annotations['proto'], some_proto\n .SerializeToString())\n self.assertEqual(seen, 2)\n\n def test_transform_ids(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n p = beam.Pipeline()\n p | MyPTransform()\n runner_api_proto = Pipeline.to_runner_api(p)\n for transform_id in runner_api_proto.components.transforms:\n self.assertRegex(transform_id, '[a-zA-Z0-9-_]+')\n\n def test_input_names(self):\n\n\n class MyPTransform(beam.PTransform):\n\n def expand(self, pcolls):\n return pcolls.values() | beam.Flatten()\n p = beam.Pipeline()\n input_names = set('ABC')\n inputs = {x: (p | x >> beam.Create([x])) for x in input_names}\n inputs | MyPTransform()\n runner_api_proto = Pipeline.to_runner_api(p)\n for transform_proto in runner_api_proto.components.transforms.values():\n if transform_proto.unique_name == 'MyPTransform':\n self.assertEqual(set(transform_proto.inputs.keys()),\n input_names)\n break\n else:\n self.fail('Unable to find transform.')\n\n def test_display_data(self):\n\n\n class MyParentTransform(beam.PTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n def display_data(self):\n parent_dd = super().display_data()\n parent_dd['p_dd_string'] = DisplayDataItem('p_dd_string_value',\n label='p_dd_string_label')\n parent_dd['p_dd_string_2'] = DisplayDataItem(\n 'p_dd_string_value_2')\n parent_dd['p_dd_bool'] = DisplayDataItem(True, label=\n 'p_dd_bool_label')\n parent_dd['p_dd_int'] = DisplayDataItem(1, label=\n 'p_dd_int_label')\n return parent_dd\n\n\n class MyPTransform(MyParentTransform):\n\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n def display_data(self):\n parent_dd = super().display_data()\n parent_dd['dd_string'] = DisplayDataItem('dd_string_value',\n label='dd_string_label')\n parent_dd['dd_string_2'] = DisplayDataItem('dd_string_value_2')\n parent_dd['dd_bool'] = DisplayDataItem(False, label=\n 'dd_bool_label')\n parent_dd['dd_double'] = DisplayDataItem(1.1, label=\n 'dd_double_label')\n return parent_dd\n p = beam.Pipeline()\n p | MyPTransform()\n proto_pipeline = Pipeline.to_runner_api(p, use_fake_coders=True)\n my_transform, = [transform for transform in proto_pipeline.\n components.transforms.values() if transform.unique_name ==\n 'MyPTransform']\n self.assertIsNotNone(my_transform)\n self.assertListEqual(list(my_transform.display_data), [\n beam_runner_api_pb2.DisplayData(urn=common_urns.\n StandardDisplayData.DisplayData.LABELLED.urn, payload=\n beam_runner_api_pb2.LabelledPayload(label='p_dd_string_label',\n key='p_dd_string', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'p_dd_string_value').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='p_dd_string_2', key='p_dd_string_2', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'p_dd_string_value_2').SerializeToString()),\n beam_runner_api_pb2.DisplayData(urn=common_urns.\n StandardDisplayData.DisplayData.LABELLED.urn, payload=\n beam_runner_api_pb2.LabelledPayload(label='p_dd_bool_label',\n key='p_dd_bool', namespace=\n 'apache_beam.pipeline_test.MyPTransform', bool_value=True).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'p_dd_int_label', key='p_dd_int', namespace=\n 'apache_beam.pipeline_test.MyPTransform', int_value=1).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'dd_string_label', key='dd_string', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'dd_string_value').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='dd_string_2', key='dd_string_2', namespace=\n 'apache_beam.pipeline_test.MyPTransform', string_value=\n 'dd_string_value_2').SerializeToString()), beam_runner_api_pb2.\n DisplayData(urn=common_urns.StandardDisplayData.DisplayData.\n LABELLED.urn, payload=beam_runner_api_pb2.LabelledPayload(label\n ='dd_bool_label', key='dd_bool', namespace=\n 'apache_beam.pipeline_test.MyPTransform', bool_value=False).\n SerializeToString()), beam_runner_api_pb2.DisplayData(urn=\n common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(label=\n 'dd_double_label', key='dd_double', namespace=\n 'apache_beam.pipeline_test.MyPTransform', double_value=1.1).\n SerializeToString())])\n\n def test_runner_api_roundtrip_preserves_resource_hints(self):\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | beam.Map(lambda x: x + 1\n ).with_resource_hints(accelerator='gpu')\n self.assertEqual(p.transforms_stack[0].parts[1].transform.\n get_resource_hints(), {common_urns.resource_hints.ACCELERATOR.\n urn: b'gpu'})\n for _ in range(3):\n p = Pipeline.from_runner_api(Pipeline.to_runner_api(p), None, None)\n self.assertEqual(p.transforms_stack[0].parts[1].transform.\n get_resource_hints(), {common_urns.resource_hints.\n ACCELERATOR.urn: b'gpu'})\n\n def test_hints_on_composite_transforms_are_propagated_to_subtransforms(self\n ):\n\n\n class FooHint(ResourceHint):\n urn = 'foo_urn'\n\n\n class BarHint(ResourceHint):\n urn = 'bar_urn'\n\n\n class BazHint(ResourceHint):\n urn = 'baz_urn'\n\n\n class QuxHint(ResourceHint):\n urn = 'qux_urn'\n\n\n class UseMaxValueHint(ResourceHint):\n urn = 'use_max_value_urn'\n\n @classmethod\n def get_merged_value(cls, outer_value, inner_value):\n return ResourceHint._use_max(outer_value, inner_value)\n ResourceHint.register_resource_hint('foo_hint', FooHint)\n ResourceHint.register_resource_hint('bar_hint', BarHint)\n ResourceHint.register_resource_hint('baz_hint', BazHint)\n ResourceHint.register_resource_hint('qux_hint', QuxHint)\n ResourceHint.register_resource_hint('use_max_value_hint',\n UseMaxValueHint)\n\n @beam.ptransform_fn\n def SubTransform(pcoll):\n return pcoll | beam.Map(lambda x: x + 1).with_resource_hints(\n foo_hint='set_on_subtransform', use_max_value_hint='10')\n\n @beam.ptransform_fn\n def CompositeTransform(pcoll):\n return pcoll | beam.Map(lambda x: x * 2) | SubTransform()\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | CompositeTransform().with_resource_hints(\n foo_hint='should_be_overriden_by_subtransform', bar_hint=\n 'set_on_composite', baz_hint='set_on_composite',\n use_max_value_hint='100')\n options = PortableOptions([\n '--resource_hint=baz_hint=should_be_overriden_by_composite',\n '--resource_hint=qux_hint=set_via_options',\n '--environment_type=PROCESS',\n '--environment_option=process_command=foo',\n '--sdk_location=container'])\n environment = ProcessEnvironment.from_options(options)\n proto = Pipeline.to_runner_api(p, default_environment=environment)\n for t in proto.components.transforms.values():\n if 'CompositeTransform/SubTransform/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(environment.resource_hints.get('foo_urn'),\n b'set_on_subtransform')\n self.assertEqual(environment.resource_hints.get('bar_urn'),\n b'set_on_composite')\n self.assertEqual(environment.resource_hints.get('baz_urn'),\n b'set_on_composite')\n self.assertEqual(environment.resource_hints.get('qux_urn'),\n b'set_via_options')\n self.assertEqual(environment.resource_hints.get(\n 'use_max_value_urn'), b'100')\n found = True\n assert found\n\n def test_environments_with_same_resource_hints_are_reused(self):\n\n\n class HintX(ResourceHint):\n urn = 'X_urn'\n\n\n class HintY(ResourceHint):\n urn = 'Y_urn'\n\n\n class HintIsOdd(ResourceHint):\n urn = 'IsOdd_urn'\n ResourceHint.register_resource_hint('X', HintX)\n ResourceHint.register_resource_hint('Y', HintY)\n ResourceHint.register_resource_hint('IsOdd', HintIsOdd)\n p = beam.Pipeline()\n num_iter = 4\n for i in range(num_iter):\n _ = p | f'NoHintCreate_{i}' >> beam.Create([1, 2]\n ) | f'NoHint_{i}' >> beam.Map(lambda x: x + 1)\n _ = p | f'XCreate_{i}' >> beam.Create([1, 2]\n ) | f'HintX_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(X='X')\n _ = p | f'XYCreate_{i}' >> beam.Create([1, 2]\n ) | f'HintXY_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(X='X', Y='Y')\n _ = p | f'IsOddCreate_{i}' >> beam.Create([1, 2]\n ) | f'IsOdd_{i}' >> beam.Map(lambda x: x + 1\n ).with_resource_hints(IsOdd=str(i % 2 != 0))\n proto = Pipeline.to_runner_api(p)\n count_x = count_xy = count_is_odd = count_no_hints = 0\n env_ids = set()\n for _, t in proto.components.transforms.items():\n env = proto.components.environments[t.environment_id]\n if t.unique_name.startswith('HintX_'):\n count_x += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {'X_urn': b'X'})\n if t.unique_name.startswith('HintXY_'):\n count_xy += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {'X_urn': b'X',\n 'Y_urn': b'Y'})\n if t.unique_name.startswith('NoHint_'):\n count_no_hints += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {})\n if t.unique_name.startswith('IsOdd_'):\n count_is_odd += 1\n env_ids.add(t.environment_id)\n self.assertTrue(env.resource_hints == {'IsOdd_urn': b'True'\n } or env.resource_hints == {'IsOdd_urn': b'False'})\n assert count_x == count_is_odd == count_xy == count_no_hints == num_iter\n assert num_iter > 1\n self.assertEqual(len(env_ids), 5)\n\n def test_multiple_application_of_the_same_transform_set_different_hints(\n self):\n\n\n class FooHint(ResourceHint):\n urn = 'foo_urn'\n\n\n class UseMaxValueHint(ResourceHint):\n urn = 'use_max_value_urn'\n\n @classmethod\n def get_merged_value(cls, outer_value, inner_value):\n return ResourceHint._use_max(outer_value, inner_value)\n ResourceHint.register_resource_hint('foo_hint', FooHint)\n ResourceHint.register_resource_hint('use_max_value_hint',\n UseMaxValueHint)\n\n @beam.ptransform_fn\n def SubTransform(pcoll):\n return pcoll | beam.Map(lambda x: x + 1)\n\n @beam.ptransform_fn\n def CompositeTransform(pcoll):\n sub = SubTransform()\n return pcoll | 'first' >> sub.with_resource_hints(foo_hint=\n 'first_application') | 'second' >> sub.with_resource_hints(\n foo_hint='second_application')\n p = beam.Pipeline()\n _ = p | beam.Create([1, 2]) | CompositeTransform()\n proto = Pipeline.to_runner_api(p)\n count = 0\n for t in proto.components.transforms.values():\n if 'CompositeTransform/first/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(b'first_application', environment.\n resource_hints.get('foo_urn'))\n count += 1\n if 'CompositeTransform/second/Map' in t.unique_name:\n environment = proto.components.environments.get(t.\n environment_id)\n self.assertEqual(b'second_application', environment.\n resource_hints.get('foo_urn'))\n count += 1\n assert count == 2\n\n def test_environments_are_deduplicated(self):\n\n def file_artifact(path, hash, staged_name):\n return beam_runner_api_pb2.ArtifactInformation(type_urn=\n common_urns.artifact_types.FILE.urn, type_payload=\n beam_runner_api_pb2.ArtifactFilePayload(path=path, sha256=\n hash).SerializeToString(), role_urn=common_urns.\n artifact_roles.STAGING_TO.urn, role_payload=\n beam_runner_api_pb2.ArtifactStagingToRolePayload(\n staged_name=staged_name).SerializeToString())\n proto = beam_runner_api_pb2.Pipeline(components=beam_runner_api_pb2\n .Components(transforms={f'transform{ix}': beam_runner_api_pb2.\n PTransform(environment_id=f'e{ix}') for ix in range(8)},\n environments={'e1': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest')]), 'e2':\n beam_runner_api_pb2.Environment(dependencies=[file_artifact(\n 'a2', 'x', 'dest')]), 'e3': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a3', 'y', 'dest')]), 'e4':\n beam_runner_api_pb2.Environment(dependencies=[file_artifact(\n 'a4', 'y', 'dest2')]), 'e5': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest'), file_artifact(\n 'b1', 'xb', 'destB')]), 'e6': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a2', 'x', 'dest'), file_artifact(\n 'b2', 'xb', 'destB')]), 'e7': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest'), file_artifact(\n 'b2', 'y', 'destB')]), 'e0': beam_runner_api_pb2.Environment(\n resource_hints={'hint': b'value'}, dependencies=[file_artifact(\n 'a1', 'x', 'dest')])}))\n Pipeline.merge_compatible_environments(proto)\n self.assertEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform2'].\n environment_id)\n self.assertEqual(proto.components.transforms['transform5'].\n environment_id, proto.components.transforms['transform6'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform3'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform4'].\n environment_id, proto.components.transforms['transform3'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform6'].\n environment_id, proto.components.transforms['transform7'].\n environment_id)\n self.assertNotEqual(proto.components.transforms['transform1'].\n environment_id, proto.components.transforms['transform0'].\n environment_id)\n self.assertEqual(len(proto.components.environments), 6)\n\n\n<mask token>\n",
"step-5": "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Unit tests for the Pipeline class.\"\"\"\n\n# pytype: skip-file\n\nimport copy\nimport platform\nimport unittest\n\nimport mock\nimport pytest\n\nimport apache_beam as beam\nfrom apache_beam import typehints\nfrom apache_beam.coders import BytesCoder\nfrom apache_beam.io import Read\nfrom apache_beam.io.iobase import SourceBase\nfrom apache_beam.options.pipeline_options import PortableOptions\nfrom apache_beam.pipeline import Pipeline\nfrom apache_beam.pipeline import PipelineOptions\nfrom apache_beam.pipeline import PipelineVisitor\nfrom apache_beam.pipeline import PTransformOverride\nfrom apache_beam.portability import common_urns\nfrom apache_beam.portability.api import beam_runner_api_pb2\nfrom apache_beam.pvalue import AsSingleton\nfrom apache_beam.pvalue import TaggedOutput\nfrom apache_beam.testing.test_pipeline import TestPipeline\nfrom apache_beam.testing.util import assert_that\nfrom apache_beam.testing.util import equal_to\nfrom apache_beam.transforms import CombineGlobally\nfrom apache_beam.transforms import Create\nfrom apache_beam.transforms import DoFn\nfrom apache_beam.transforms import FlatMap\nfrom apache_beam.transforms import Map\nfrom apache_beam.transforms import ParDo\nfrom apache_beam.transforms import PTransform\nfrom apache_beam.transforms import WindowInto\nfrom apache_beam.transforms.display import DisplayDataItem\nfrom apache_beam.transforms.environments import ProcessEnvironment\nfrom apache_beam.transforms.resources import ResourceHint\nfrom apache_beam.transforms.userstate import BagStateSpec\nfrom apache_beam.transforms.window import SlidingWindows\nfrom apache_beam.transforms.window import TimestampedValue\nfrom apache_beam.utils import windowed_value\nfrom apache_beam.utils.timestamp import MIN_TIMESTAMP\n\n\nclass FakeUnboundedSource(SourceBase):\n \"\"\"Fake unbounded source. Does not work at runtime\"\"\"\n def is_bounded(self):\n return False\n\n\nclass DoubleParDo(beam.PTransform):\n def expand(self, input):\n return input | 'Inner' >> beam.Map(lambda a: a * 2)\n\n def to_runner_api_parameter(self, context):\n return self.to_runner_api_pickled(context)\n\n\nclass TripleParDo(beam.PTransform):\n def expand(self, input):\n # Keeping labels the same intentionally to make sure that there is no label\n # conflict due to replacement.\n return input | 'Inner' >> beam.Map(lambda a: a * 3)\n\n\nclass ToStringParDo(beam.PTransform):\n def expand(self, input):\n # We use copy.copy() here to make sure the typehint mechanism doesn't\n # automatically infer that the output type is str.\n return input | 'Inner' >> beam.Map(lambda a: copy.copy(str(a)))\n\n\nclass FlattenAndDouble(beam.PTransform):\n def expand(self, pcolls):\n return pcolls | beam.Flatten() | 'Double' >> DoubleParDo()\n\n\nclass FlattenAndTriple(beam.PTransform):\n def expand(self, pcolls):\n return pcolls | beam.Flatten() | 'Triple' >> TripleParDo()\n\n\nclass AddWithProductDoFn(beam.DoFn):\n def process(self, input, a, b):\n yield input + a * b\n\n\nclass AddThenMultiplyDoFn(beam.DoFn):\n def process(self, input, a, b):\n yield (input + a) * b\n\n\nclass AddThenMultiply(beam.PTransform):\n def expand(self, pvalues):\n return pvalues[0] | beam.ParDo(\n AddThenMultiplyDoFn(), AsSingleton(pvalues[1]), AsSingleton(pvalues[2]))\n\n\nclass PipelineTest(unittest.TestCase):\n @staticmethod\n def custom_callable(pcoll):\n return pcoll | '+1' >> FlatMap(lambda x: [x + 1])\n\n # Some of these tests designate a runner by name, others supply a runner.\n # This variation is just to verify that both means of runner specification\n # work and is not related to other aspects of the tests.\n\n class CustomTransform(PTransform):\n def expand(self, pcoll):\n return pcoll | '+1' >> FlatMap(lambda x: [x + 1])\n\n class Visitor(PipelineVisitor):\n def __init__(self, visited):\n self.visited = visited\n self.enter_composite = []\n self.leave_composite = []\n\n def visit_value(self, value, _):\n self.visited.append(value)\n\n def enter_composite_transform(self, transform_node):\n self.enter_composite.append(transform_node)\n\n def leave_composite_transform(self, transform_node):\n self.leave_composite.append(transform_node)\n\n def test_create(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'label1' >> Create([1, 2, 3])\n assert_that(pcoll, equal_to([1, 2, 3]))\n\n # Test if initial value is an iterator object.\n pcoll2 = pipeline | 'label2' >> Create(iter((4, 5, 6)))\n pcoll3 = pcoll2 | 'do' >> FlatMap(lambda x: [x + 10])\n assert_that(pcoll3, equal_to([14, 15, 16]), label='pcoll3')\n\n def test_flatmap_builtin(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'label1' >> Create([1, 2, 3])\n assert_that(pcoll, equal_to([1, 2, 3]))\n\n pcoll2 = pcoll | 'do' >> FlatMap(lambda x: [x + 10])\n assert_that(pcoll2, equal_to([11, 12, 13]), label='pcoll2')\n\n pcoll3 = pcoll2 | 'm1' >> Map(lambda x: [x, 12])\n assert_that(\n pcoll3, equal_to([[11, 12], [12, 12], [13, 12]]), label='pcoll3')\n\n pcoll4 = pcoll3 | 'do2' >> FlatMap(set)\n assert_that(pcoll4, equal_to([11, 12, 12, 12, 13]), label='pcoll4')\n\n def test_maptuple_builtin(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | Create([('e1', 'e2')])\n side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))\n side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))\n\n # A test function with a tuple input, an auxiliary parameter,\n # and some side inputs.\n fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (\n e1, e2, t, s1, s2)\n assert_that(\n pcoll | 'NoSides' >> beam.core.MapTuple(fn),\n equal_to([('e1', 'e2', MIN_TIMESTAMP, None, None)]),\n label='NoSidesCheck')\n assert_that(\n pcoll | 'StaticSides' >> beam.core.MapTuple(fn, 's1', 's2'),\n equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),\n label='StaticSidesCheck')\n assert_that(\n pcoll | 'DynamicSides' >> beam.core.MapTuple(fn, side1, side2),\n equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),\n label='DynamicSidesCheck')\n assert_that(\n pcoll | 'MixedSides' >> beam.core.MapTuple(fn, s2=side2),\n equal_to([('e1', 'e2', MIN_TIMESTAMP, None, 's2')]),\n label='MixedSidesCheck')\n\n def test_flatmaptuple_builtin(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | Create([('e1', 'e2')])\n side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))\n side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))\n\n # A test function with a tuple input, an auxiliary parameter,\n # and some side inputs.\n fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (\n e1, e2, t, s1, s2)\n assert_that(\n pcoll | 'NoSides' >> beam.core.FlatMapTuple(fn),\n equal_to(['e1', 'e2', MIN_TIMESTAMP, None, None]),\n label='NoSidesCheck')\n assert_that(\n pcoll | 'StaticSides' >> beam.core.FlatMapTuple(fn, 's1', 's2'),\n equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),\n label='StaticSidesCheck')\n assert_that(\n pcoll\n | 'DynamicSides' >> beam.core.FlatMapTuple(fn, side1, side2),\n equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),\n label='DynamicSidesCheck')\n assert_that(\n pcoll | 'MixedSides' >> beam.core.FlatMapTuple(fn, s2=side2),\n equal_to(['e1', 'e2', MIN_TIMESTAMP, None, 's2']),\n label='MixedSidesCheck')\n\n def test_create_singleton_pcollection(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'label' >> Create([[1, 2, 3]])\n assert_that(pcoll, equal_to([[1, 2, 3]]))\n\n def test_visit_entire_graph(self):\n pipeline = Pipeline()\n pcoll1 = pipeline | 'pcoll' >> beam.Impulse()\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n pcoll4 = pcoll2 | 'do3' >> FlatMap(lambda x: [x + 1])\n transform = PipelineTest.CustomTransform()\n pcoll5 = pcoll4 | transform\n\n visitor = PipelineTest.Visitor(visited=[])\n pipeline.visit(visitor)\n self.assertEqual({pcoll1, pcoll2, pcoll3, pcoll4, pcoll5},\n set(visitor.visited))\n self.assertEqual(set(visitor.enter_composite), set(visitor.leave_composite))\n self.assertEqual(2, len(visitor.enter_composite))\n self.assertEqual(visitor.enter_composite[1].transform, transform)\n self.assertEqual(visitor.leave_composite[0].transform, transform)\n\n def test_apply_custom_transform(self):\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'pcoll' >> Create([1, 2, 3])\n result = pcoll | PipelineTest.CustomTransform()\n assert_that(result, equal_to([2, 3, 4]))\n\n def test_reuse_custom_transform_instance(self):\n pipeline = Pipeline()\n pcoll1 = pipeline | 'pcoll1' >> Create([1, 2, 3])\n pcoll2 = pipeline | 'pcoll2' >> Create([4, 5, 6])\n transform = PipelineTest.CustomTransform()\n pcoll1 | transform\n with self.assertRaises(RuntimeError) as cm:\n pipeline.apply(transform, pcoll2)\n self.assertEqual(\n cm.exception.args[0],\n 'A transform with label \"CustomTransform\" already exists in the '\n 'pipeline. To apply a transform with a specified label write '\n 'pvalue | \"label\" >> transform')\n\n def test_reuse_cloned_custom_transform_instance(self):\n with TestPipeline() as pipeline:\n pcoll1 = pipeline | 'pc1' >> Create([1, 2, 3])\n pcoll2 = pipeline | 'pc2' >> Create([4, 5, 6])\n transform = PipelineTest.CustomTransform()\n result1 = pcoll1 | transform\n result2 = pcoll2 | 'new_label' >> transform\n assert_that(result1, equal_to([2, 3, 4]), label='r1')\n assert_that(result2, equal_to([5, 6, 7]), label='r2')\n\n def test_transform_no_super_init(self):\n class AddSuffix(PTransform):\n def __init__(self, suffix):\n # No call to super(...).__init__\n self.suffix = suffix\n\n def expand(self, pcoll):\n return pcoll | Map(lambda x: x + self.suffix)\n\n self.assertEqual(['a-x', 'b-x', 'c-x'],\n sorted(['a', 'b', 'c'] | 'AddSuffix' >> AddSuffix('-x')))\n\n @unittest.skip(\"Fails on some platforms with new urllib3.\")\n def test_memory_usage(self):\n try:\n import resource\n except ImportError:\n # Skip the test if resource module is not available (e.g. non-Unix os).\n self.skipTest('resource module not available.')\n if platform.mac_ver()[0]:\n # Skip the test on macos, depending on version it returns ru_maxrss in\n # different units.\n self.skipTest('ru_maxrss is not in standard units.')\n\n def get_memory_usage_in_bytes():\n return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * (2**10)\n\n def check_memory(value, memory_threshold):\n memory_usage = get_memory_usage_in_bytes()\n if memory_usage > memory_threshold:\n raise RuntimeError(\n 'High memory usage: %d > %d' % (memory_usage, memory_threshold))\n return value\n\n len_elements = 1000000\n num_elements = 10\n num_maps = 100\n\n # TODO(robertwb): reduce memory usage of FnApiRunner so that this test\n # passes.\n with TestPipeline(runner='BundleBasedDirectRunner') as pipeline:\n\n # Consumed memory should not be proportional to the number of maps.\n memory_threshold = (\n get_memory_usage_in_bytes() + (5 * len_elements * num_elements))\n\n # Plus small additional slack for memory fluctuations during the test.\n memory_threshold += 10 * (2**20)\n\n biglist = pipeline | 'oom:create' >> Create(\n ['x' * len_elements] * num_elements)\n for i in range(num_maps):\n biglist = biglist | ('oom:addone-%d' % i) >> Map(lambda x: x + 'y')\n result = biglist | 'oom:check' >> Map(check_memory, memory_threshold)\n assert_that(\n result,\n equal_to(['x' * len_elements + 'y' * num_maps] * num_elements))\n\n def test_aggregator_empty_input(self):\n actual = [] | CombineGlobally(max).without_defaults()\n self.assertEqual(actual, [])\n\n def test_pipeline_as_context(self):\n def raise_exception(exn):\n raise exn\n\n with self.assertRaises(ValueError):\n with Pipeline() as p:\n # pylint: disable=expression-not-assigned\n p | Create([ValueError('msg')]) | Map(raise_exception)\n\n def test_ptransform_overrides(self):\n class MyParDoOverride(PTransformOverride):\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(\n self, applied_ptransform):\n ptransform = applied_ptransform.transform\n if isinstance(ptransform, DoubleParDo):\n return TripleParDo()\n raise ValueError('Unsupported type of transform: %r' % ptransform)\n\n p = Pipeline()\n pcoll = p | beam.Create([1, 2, 3]) | 'Multiply' >> DoubleParDo()\n assert_that(pcoll, equal_to([3, 6, 9]))\n\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_type_hints(self):\n class NoTypeHintOverride(PTransformOverride):\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(\n self, applied_ptransform):\n return ToStringParDo()\n\n class WithTypeHintOverride(PTransformOverride):\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, DoubleParDo)\n\n def get_replacement_transform_for_applied_ptransform(\n self, applied_ptransform):\n return ToStringParDo().with_input_types(int).with_output_types(str)\n\n for override, expected_type in [(NoTypeHintOverride(), int),\n (WithTypeHintOverride(), str)]:\n p = TestPipeline()\n pcoll = (\n p\n | beam.Create([1, 2, 3])\n | 'Operate' >> DoubleParDo()\n | 'NoOp' >> beam.Map(lambda x: x))\n\n p.replace_all([override])\n self.assertEqual(pcoll.producer.inputs[0].element_type, expected_type)\n\n def test_ptransform_override_multiple_inputs(self):\n class MyParDoOverride(PTransformOverride):\n def matches(self, applied_ptransform):\n return isinstance(applied_ptransform.transform, FlattenAndDouble)\n\n def get_replacement_transform(self, applied_ptransform):\n return FlattenAndTriple()\n\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([1, 2, 3])\n pcoll2 = p | 'pc2' >> beam.Create([4, 5, 6])\n pcoll3 = (pcoll1, pcoll2) | 'FlattenAndMultiply' >> FlattenAndDouble()\n assert_that(pcoll3, equal_to([3, 6, 9, 12, 15, 18]))\n\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_side_inputs(self):\n class MyParDoOverride(PTransformOverride):\n def matches(self, applied_ptransform):\n return (\n isinstance(applied_ptransform.transform, ParDo) and\n isinstance(applied_ptransform.transform.fn, AddWithProductDoFn))\n\n def get_replacement_transform(self, transform):\n return AddThenMultiply()\n\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([2])\n pcoll2 = p | 'pc2' >> beam.Create([3])\n pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])\n result = pcoll3 | 'Operate' >> beam.ParDo(\n AddWithProductDoFn(), AsSingleton(pcoll1), AsSingleton(pcoll2))\n assert_that(result, equal_to([18, 21, 24]))\n\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_replacement_inputs(self):\n class MyParDoOverride(PTransformOverride):\n def matches(self, applied_ptransform):\n return (\n isinstance(applied_ptransform.transform, ParDo) and\n isinstance(applied_ptransform.transform.fn, AddWithProductDoFn))\n\n def get_replacement_transform(self, transform):\n return AddThenMultiply()\n\n def get_replacement_inputs(self, applied_ptransform):\n assert len(applied_ptransform.inputs) == 1\n assert len(applied_ptransform.side_inputs) == 2\n # Swap the order of the two side inputs\n return (\n applied_ptransform.inputs[0],\n applied_ptransform.side_inputs[1].pvalue,\n applied_ptransform.side_inputs[0].pvalue)\n\n p = Pipeline()\n pcoll1 = p | 'pc1' >> beam.Create([2])\n pcoll2 = p | 'pc2' >> beam.Create([3])\n pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])\n result = pcoll3 | 'Operate' >> beam.ParDo(\n AddWithProductDoFn(), AsSingleton(pcoll1), AsSingleton(pcoll2))\n assert_that(result, equal_to([14, 16, 18]))\n\n p.replace_all([MyParDoOverride()])\n p.run()\n\n def test_ptransform_override_multiple_outputs(self):\n class MultiOutputComposite(PTransform):\n def __init__(self):\n self.output_tags = set()\n\n def expand(self, pcoll):\n def mux_input(x):\n x = x * 2\n if isinstance(x, int):\n yield TaggedOutput('numbers', x)\n else:\n yield TaggedOutput('letters', x)\n\n multi = pcoll | 'MyReplacement' >> beam.ParDo(mux_input).with_outputs()\n letters = multi.letters | 'LettersComposite' >> beam.Map(\n lambda x: x * 3)\n numbers = multi.numbers | 'NumbersComposite' >> beam.Map(\n lambda x: x * 5)\n\n return {\n 'letters': letters,\n 'numbers': numbers,\n }\n\n class MultiOutputOverride(PTransformOverride):\n def matches(self, applied_ptransform):\n return applied_ptransform.full_label == 'MyMultiOutput'\n\n def get_replacement_transform_for_applied_ptransform(\n self, applied_ptransform):\n return MultiOutputComposite()\n\n def mux_input(x):\n if isinstance(x, int):\n yield TaggedOutput('numbers', x)\n else:\n yield TaggedOutput('letters', x)\n\n with TestPipeline() as p:\n multi = (\n p\n | beam.Create([1, 2, 3, 'a', 'b', 'c'])\n | 'MyMultiOutput' >> beam.ParDo(mux_input).with_outputs())\n letters = multi.letters | 'MyLetters' >> beam.Map(lambda x: x)\n numbers = multi.numbers | 'MyNumbers' >> beam.Map(lambda x: x)\n\n # Assert that the PCollection replacement worked correctly and that\n # elements are flowing through. The replacement transform first\n # multiples by 2 then the leaf nodes inside the composite multiply by\n # an additional 3 and 5. Use prime numbers to ensure that each\n # transform is getting executed once.\n assert_that(\n letters,\n equal_to(['a' * 2 * 3, 'b' * 2 * 3, 'c' * 2 * 3]),\n label='assert letters')\n assert_that(\n numbers,\n equal_to([1 * 2 * 5, 2 * 2 * 5, 3 * 2 * 5]),\n label='assert numbers')\n\n # Do the replacement and run the element assertions.\n p.replace_all([MultiOutputOverride()])\n\n # The following checks the graph to make sure the replacement occurred.\n visitor = PipelineTest.Visitor(visited=[])\n p.visit(visitor)\n pcollections = visitor.visited\n composites = visitor.enter_composite\n\n # Assert the replacement is in the composite list and retrieve the\n # AppliedPTransform.\n self.assertIn(\n MultiOutputComposite, [t.transform.__class__ for t in composites])\n multi_output_composite = list(\n filter(\n lambda t: t.transform.__class__ == MultiOutputComposite,\n composites))[0]\n\n # Assert that all of the replacement PCollections are in the graph.\n for output in multi_output_composite.outputs.values():\n self.assertIn(output, pcollections)\n\n # Assert that all of the \"old\"/replaced PCollections are not in the graph.\n self.assertNotIn(multi[None], visitor.visited)\n self.assertNotIn(multi.letters, visitor.visited)\n self.assertNotIn(multi.numbers, visitor.visited)\n\n def test_kv_ptransform_honor_type_hints(self):\n\n # The return type of this DoFn cannot be inferred by the default\n # Beam type inference\n class StatefulDoFn(DoFn):\n BYTES_STATE = BagStateSpec('bytes', BytesCoder())\n\n def return_recursive(self, count):\n if count == 0:\n return [\"some string\"]\n else:\n self.return_recursive(count - 1)\n\n def process(self, element, counter=DoFn.StateParam(BYTES_STATE)):\n return self.return_recursive(1)\n\n with TestPipeline() as p:\n pcoll = (\n p\n | beam.Create([(1, 1), (2, 2), (3, 3)])\n | beam.GroupByKey()\n | beam.ParDo(StatefulDoFn()))\n self.assertEqual(pcoll.element_type, typehints.Any)\n\n with TestPipeline() as p:\n pcoll = (\n p\n | beam.Create([(1, 1), (2, 2), (3, 3)])\n | beam.GroupByKey()\n | beam.ParDo(StatefulDoFn()).with_output_types(str))\n self.assertEqual(pcoll.element_type, str)\n\n def test_track_pcoll_unbounded(self):\n pipeline = TestPipeline()\n pcoll1 = pipeline | 'read' >> Read(FakeUnboundedSource())\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n self.assertIs(pcoll1.is_bounded, False)\n self.assertIs(pcoll2.is_bounded, False)\n self.assertIs(pcoll3.is_bounded, False)\n\n def test_track_pcoll_bounded(self):\n pipeline = TestPipeline()\n pcoll1 = pipeline | 'label1' >> Create([1, 2, 3])\n pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])\n pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])\n self.assertIs(pcoll1.is_bounded, True)\n self.assertIs(pcoll2.is_bounded, True)\n self.assertIs(pcoll3.is_bounded, True)\n\n def test_track_pcoll_bounded_flatten(self):\n pipeline = TestPipeline()\n pcoll1_a = pipeline | 'label_a' >> Create([1, 2, 3])\n pcoll2_a = pcoll1_a | 'do_a' >> FlatMap(lambda x: [x + 1])\n\n pcoll1_b = pipeline | 'label_b' >> Create([1, 2, 3])\n pcoll2_b = pcoll1_b | 'do_b' >> FlatMap(lambda x: [x + 1])\n\n merged = (pcoll2_a, pcoll2_b) | beam.Flatten()\n\n self.assertIs(pcoll1_a.is_bounded, True)\n self.assertIs(pcoll2_a.is_bounded, True)\n self.assertIs(pcoll1_b.is_bounded, True)\n self.assertIs(pcoll2_b.is_bounded, True)\n self.assertIs(merged.is_bounded, True)\n\n def test_track_pcoll_unbounded_flatten(self):\n pipeline = TestPipeline()\n pcoll1_bounded = pipeline | 'label1' >> Create([1, 2, 3])\n pcoll2_bounded = pcoll1_bounded | 'do1' >> FlatMap(lambda x: [x + 1])\n\n pcoll1_unbounded = pipeline | 'read' >> Read(FakeUnboundedSource())\n pcoll2_unbounded = pcoll1_unbounded | 'do2' >> FlatMap(lambda x: [x + 1])\n\n merged = (pcoll2_bounded, pcoll2_unbounded) | beam.Flatten()\n\n self.assertIs(pcoll1_bounded.is_bounded, True)\n self.assertIs(pcoll2_bounded.is_bounded, True)\n self.assertIs(pcoll1_unbounded.is_bounded, False)\n self.assertIs(pcoll2_unbounded.is_bounded, False)\n self.assertIs(merged.is_bounded, False)\n\n def test_incompatible_submission_and_runtime_envs_fail_pipeline(self):\n with mock.patch(\n 'apache_beam.transforms.environments.sdk_base_version_capability'\n ) as base_version:\n base_version.side_effect = [\n f\"beam:version:sdk_base:apache/beam_python3.5_sdk:2.{i}.0\"\n for i in range(100)\n ]\n with self.assertRaisesRegex(\n RuntimeError,\n 'Pipeline construction environment and pipeline runtime '\n 'environment are not compatible.'):\n with TestPipeline() as p:\n _ = p | Create([None])\n\n\nclass DoFnTest(unittest.TestCase):\n def test_element(self):\n class TestDoFn(DoFn):\n def process(self, element):\n yield element + 10\n\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())\n assert_that(pcoll, equal_to([11, 12]))\n\n def test_side_input_no_tag(self):\n class TestDoFn(DoFn):\n def process(self, element, prefix, suffix):\n return ['%s-%s-%s' % (prefix, element, suffix)]\n\n with TestPipeline() as pipeline:\n words_list = ['aa', 'bb', 'cc']\n words = pipeline | 'SomeWords' >> Create(words_list)\n prefix = 'zyx'\n suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in\n result = words | 'DecorateWordsDoFnNoTag' >> ParDo(\n TestDoFn(), prefix, suffix=AsSingleton(suffix))\n assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))\n\n def test_side_input_tagged(self):\n class TestDoFn(DoFn):\n def process(self, element, prefix, suffix=DoFn.SideInputParam):\n return ['%s-%s-%s' % (prefix, element, suffix)]\n\n with TestPipeline() as pipeline:\n words_list = ['aa', 'bb', 'cc']\n words = pipeline | 'SomeWords' >> Create(words_list)\n prefix = 'zyx'\n suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in\n result = words | 'DecorateWordsDoFnNoTag' >> ParDo(\n TestDoFn(), prefix, suffix=AsSingleton(suffix))\n assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))\n\n @pytest.mark.it_validatesrunner\n def test_element_param(self):\n pipeline = TestPipeline()\n input = [1, 2]\n pcoll = (\n pipeline\n | 'Create' >> Create(input)\n | 'Ele param' >> Map(lambda element=DoFn.ElementParam: element))\n assert_that(pcoll, equal_to(input))\n pipeline.run()\n\n @pytest.mark.it_validatesrunner\n def test_key_param(self):\n pipeline = TestPipeline()\n pcoll = (\n pipeline\n | 'Create' >> Create([('a', 1), ('b', 2)])\n | 'Key param' >> Map(lambda _, key=DoFn.KeyParam: key))\n assert_that(pcoll, equal_to(['a', 'b']))\n pipeline.run()\n\n def test_window_param(self):\n class TestDoFn(DoFn):\n def process(self, element, window=DoFn.WindowParam):\n yield (element, (float(window.start), float(window.end)))\n\n with TestPipeline() as pipeline:\n pcoll = (\n pipeline\n | Create([1, 7])\n | Map(lambda x: TimestampedValue(x, x))\n | WindowInto(windowfn=SlidingWindows(10, 5))\n | ParDo(TestDoFn()))\n assert_that(\n pcoll,\n equal_to([(1, (-5, 5)), (1, (0, 10)), (7, (0, 10)), (7, (5, 15))]))\n pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn())\n assert_that(\n pcoll2,\n equal_to([((1, (-5, 5)), (-5, 5)), ((1, (0, 10)), (0, 10)),\n ((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5, 15))]),\n label='doubled windows')\n\n def test_timestamp_param(self):\n class TestDoFn(DoFn):\n def process(self, element, timestamp=DoFn.TimestampParam):\n yield timestamp\n\n with TestPipeline() as pipeline:\n pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())\n assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))\n\n def test_timestamp_param_map(self):\n with TestPipeline() as p:\n assert_that(\n p | Create([1, 2]) | beam.Map(lambda _, t=DoFn.TimestampParam: t),\n equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))\n\n def test_pane_info_param(self):\n with TestPipeline() as p:\n pc = p | Create([(None, None)])\n assert_that(\n pc | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),\n equal_to([windowed_value.PANE_INFO_UNKNOWN]),\n label='CheckUngrouped')\n assert_that(\n pc | beam.GroupByKey() | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),\n equal_to([\n windowed_value.PaneInfo(\n is_first=True,\n is_last=True,\n timing=windowed_value.PaneInfoTiming.ON_TIME,\n index=0,\n nonspeculative_index=0)\n ]),\n label='CheckGrouped')\n\n def test_incomparable_default(self):\n class IncomparableType(object):\n def __eq__(self, other):\n raise RuntimeError()\n\n def __ne__(self, other):\n raise RuntimeError()\n\n def __hash__(self):\n raise RuntimeError()\n\n # Ensure that we don't use default values in a context where they must be\n # comparable (see BEAM-8301).\n with TestPipeline() as pipeline:\n pcoll = (\n pipeline\n | beam.Create([None])\n | Map(lambda e, x=IncomparableType(): (e, type(x).__name__)))\n assert_that(pcoll, equal_to([(None, 'IncomparableType')]))\n\n\nclass Bacon(PipelineOptions):\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument('--slices', type=int)\n\n\nclass Eggs(PipelineOptions):\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument('--style', default='scrambled')\n\n\nclass Breakfast(Bacon, Eggs):\n pass\n\n\nclass PipelineOptionsTest(unittest.TestCase):\n def test_flag_parsing(self):\n options = Breakfast(['--slices=3', '--style=sunny side up', '--ignored'])\n self.assertEqual(3, options.slices)\n self.assertEqual('sunny side up', options.style)\n\n def test_keyword_parsing(self):\n options = Breakfast(['--slices=3', '--style=sunny side up', '--ignored'],\n slices=10)\n self.assertEqual(10, options.slices)\n self.assertEqual('sunny side up', options.style)\n\n def test_attribute_setting(self):\n options = Breakfast(slices=10)\n self.assertEqual(10, options.slices)\n options.slices = 20\n self.assertEqual(20, options.slices)\n\n def test_view_as(self):\n generic_options = PipelineOptions(['--slices=3'])\n self.assertEqual(3, generic_options.view_as(Bacon).slices)\n self.assertEqual(3, generic_options.view_as(Breakfast).slices)\n\n generic_options.view_as(Breakfast).slices = 10\n self.assertEqual(10, generic_options.view_as(Bacon).slices)\n\n with self.assertRaises(AttributeError):\n generic_options.slices # pylint: disable=pointless-statement\n\n with self.assertRaises(AttributeError):\n generic_options.view_as(Eggs).slices # pylint: disable=expression-not-assigned\n\n def test_defaults(self):\n options = Breakfast(['--slices=3'])\n self.assertEqual(3, options.slices)\n self.assertEqual('scrambled', options.style)\n\n def test_dir(self):\n options = Breakfast()\n self.assertEqual({\n 'from_dictionary',\n 'get_all_options',\n 'slices',\n 'style',\n 'view_as',\n 'display_data'\n },\n {\n attr\n for attr in dir(options)\n if not attr.startswith('_') and attr != 'next'\n })\n self.assertEqual({\n 'from_dictionary',\n 'get_all_options',\n 'style',\n 'view_as',\n 'display_data'\n },\n {\n attr\n for attr in dir(options.view_as(Eggs))\n if not attr.startswith('_') and attr != 'next'\n })\n\n\nclass RunnerApiTest(unittest.TestCase):\n def test_parent_pointer(self):\n class MyPTransform(beam.PTransform):\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n p = beam.Pipeline()\n p | MyPTransform() # pylint: disable=expression-not-assigned\n p = Pipeline.from_runner_api(\n Pipeline.to_runner_api(p, use_fake_coders=True), None, None)\n self.assertIsNotNone(p.transforms_stack[0].parts[0].parent)\n self.assertEqual(\n p.transforms_stack[0].parts[0].parent, p.transforms_stack[0])\n\n def test_requirements(self):\n p = beam.Pipeline()\n _ = (\n p | beam.Create([])\n | beam.ParDo(lambda x, finalize=beam.DoFn.BundleFinalizerParam: None))\n proto = p.to_runner_api()\n self.assertTrue(\n common_urns.requirements.REQUIRES_BUNDLE_FINALIZATION.urn,\n proto.requirements)\n\n def test_annotations(self):\n some_proto = BytesCoder().to_runner_api(None)\n\n class EmptyTransform(beam.PTransform):\n def expand(self, pcoll):\n return pcoll\n\n def annotations(self):\n return {'foo': 'some_string'}\n\n class NonEmptyTransform(beam.PTransform):\n def expand(self, pcoll):\n return pcoll | beam.Map(lambda x: x)\n\n def annotations(self):\n return {\n 'foo': b'some_bytes',\n 'proto': some_proto,\n }\n\n p = beam.Pipeline()\n _ = p | beam.Create([]) | EmptyTransform() | NonEmptyTransform()\n proto = p.to_runner_api()\n\n seen = 0\n for transform in proto.components.transforms.values():\n if transform.unique_name == 'EmptyTransform':\n seen += 1\n self.assertEqual(transform.annotations['foo'], b'some_string')\n elif transform.unique_name == 'NonEmptyTransform':\n seen += 1\n self.assertEqual(transform.annotations['foo'], b'some_bytes')\n self.assertEqual(\n transform.annotations['proto'], some_proto.SerializeToString())\n self.assertEqual(seen, 2)\n\n def test_transform_ids(self):\n class MyPTransform(beam.PTransform):\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n p = beam.Pipeline()\n p | MyPTransform() # pylint: disable=expression-not-assigned\n runner_api_proto = Pipeline.to_runner_api(p)\n\n for transform_id in runner_api_proto.components.transforms:\n self.assertRegex(transform_id, r'[a-zA-Z0-9-_]+')\n\n def test_input_names(self):\n class MyPTransform(beam.PTransform):\n def expand(self, pcolls):\n return pcolls.values() | beam.Flatten()\n\n p = beam.Pipeline()\n input_names = set('ABC')\n inputs = {x: p | x >> beam.Create([x]) for x in input_names}\n inputs | MyPTransform() # pylint: disable=expression-not-assigned\n runner_api_proto = Pipeline.to_runner_api(p)\n\n for transform_proto in runner_api_proto.components.transforms.values():\n if transform_proto.unique_name == 'MyPTransform':\n self.assertEqual(set(transform_proto.inputs.keys()), input_names)\n break\n else:\n self.fail('Unable to find transform.')\n\n def test_display_data(self):\n class MyParentTransform(beam.PTransform):\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n def display_data(self): # type: () -> dict\n parent_dd = super().display_data()\n parent_dd['p_dd_string'] = DisplayDataItem(\n 'p_dd_string_value', label='p_dd_string_label')\n parent_dd['p_dd_string_2'] = DisplayDataItem('p_dd_string_value_2')\n parent_dd['p_dd_bool'] = DisplayDataItem(True, label='p_dd_bool_label')\n parent_dd['p_dd_int'] = DisplayDataItem(1, label='p_dd_int_label')\n return parent_dd\n\n class MyPTransform(MyParentTransform):\n def expand(self, p):\n self.p = p\n return p | beam.Create([None])\n\n def display_data(self): # type: () -> dict\n parent_dd = super().display_data()\n parent_dd['dd_string'] = DisplayDataItem(\n 'dd_string_value', label='dd_string_label')\n parent_dd['dd_string_2'] = DisplayDataItem('dd_string_value_2')\n parent_dd['dd_bool'] = DisplayDataItem(False, label='dd_bool_label')\n parent_dd['dd_double'] = DisplayDataItem(1.1, label='dd_double_label')\n return parent_dd\n\n p = beam.Pipeline()\n p | MyPTransform() # pylint: disable=expression-not-assigned\n\n proto_pipeline = Pipeline.to_runner_api(p, use_fake_coders=True)\n my_transform, = [\n transform\n for transform in proto_pipeline.components.transforms.values()\n if transform.unique_name == 'MyPTransform'\n ]\n self.assertIsNotNone(my_transform)\n self.assertListEqual(\n list(my_transform.display_data),\n [\n beam_runner_api_pb2.DisplayData(\n urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(\n label='p_dd_string_label',\n key='p_dd_string',\n namespace='apache_beam.pipeline_test.MyPTransform',\n string_value='p_dd_string_value').SerializeToString()),\n beam_runner_api_pb2.DisplayData(\n urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(\n label='p_dd_string_2',\n key='p_dd_string_2',\n namespace='apache_beam.pipeline_test.MyPTransform',\n string_value='p_dd_string_value_2').SerializeToString()),\n beam_runner_api_pb2.DisplayData(\n urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(\n label='p_dd_bool_label',\n key='p_dd_bool',\n namespace='apache_beam.pipeline_test.MyPTransform',\n bool_value=True).SerializeToString()),\n beam_runner_api_pb2.DisplayData(\n urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(\n label='p_dd_int_label',\n key='p_dd_int',\n namespace='apache_beam.pipeline_test.MyPTransform',\n int_value=1).SerializeToString()),\n beam_runner_api_pb2.DisplayData(\n urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(\n label='dd_string_label',\n key='dd_string',\n namespace='apache_beam.pipeline_test.MyPTransform',\n string_value='dd_string_value').SerializeToString()),\n beam_runner_api_pb2.DisplayData(\n urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(\n label='dd_string_2',\n key='dd_string_2',\n namespace='apache_beam.pipeline_test.MyPTransform',\n string_value='dd_string_value_2').SerializeToString()),\n beam_runner_api_pb2.DisplayData(\n urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(\n label='dd_bool_label',\n key='dd_bool',\n namespace='apache_beam.pipeline_test.MyPTransform',\n bool_value=False).SerializeToString()),\n beam_runner_api_pb2.DisplayData(\n urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,\n payload=beam_runner_api_pb2.LabelledPayload(\n label='dd_double_label',\n key='dd_double',\n namespace='apache_beam.pipeline_test.MyPTransform',\n double_value=1.1).SerializeToString()),\n ])\n\n def test_runner_api_roundtrip_preserves_resource_hints(self):\n p = beam.Pipeline()\n _ = (\n p | beam.Create([1, 2])\n | beam.Map(lambda x: x + 1).with_resource_hints(accelerator='gpu'))\n\n self.assertEqual(\n p.transforms_stack[0].parts[1].transform.get_resource_hints(),\n {common_urns.resource_hints.ACCELERATOR.urn: b'gpu'})\n\n for _ in range(3):\n # Verify that DEFAULT environments are recreated during multiple RunnerAPI\n # translation and hints don't get lost.\n p = Pipeline.from_runner_api(Pipeline.to_runner_api(p), None, None)\n self.assertEqual(\n p.transforms_stack[0].parts[1].transform.get_resource_hints(),\n {common_urns.resource_hints.ACCELERATOR.urn: b'gpu'})\n\n def test_hints_on_composite_transforms_are_propagated_to_subtransforms(self):\n class FooHint(ResourceHint):\n urn = 'foo_urn'\n\n class BarHint(ResourceHint):\n urn = 'bar_urn'\n\n class BazHint(ResourceHint):\n urn = 'baz_urn'\n\n class QuxHint(ResourceHint):\n urn = 'qux_urn'\n\n class UseMaxValueHint(ResourceHint):\n urn = 'use_max_value_urn'\n\n @classmethod\n def get_merged_value(\n cls, outer_value, inner_value): # type: (bytes, bytes) -> bytes\n return ResourceHint._use_max(outer_value, inner_value)\n\n ResourceHint.register_resource_hint('foo_hint', FooHint)\n ResourceHint.register_resource_hint('bar_hint', BarHint)\n ResourceHint.register_resource_hint('baz_hint', BazHint)\n ResourceHint.register_resource_hint('qux_hint', QuxHint)\n ResourceHint.register_resource_hint('use_max_value_hint', UseMaxValueHint)\n\n @beam.ptransform_fn\n def SubTransform(pcoll):\n return pcoll | beam.Map(lambda x: x + 1).with_resource_hints(\n foo_hint='set_on_subtransform', use_max_value_hint='10')\n\n @beam.ptransform_fn\n def CompositeTransform(pcoll):\n return pcoll | beam.Map(lambda x: x * 2) | SubTransform()\n\n p = beam.Pipeline()\n _ = (\n p | beam.Create([1, 2])\n | CompositeTransform().with_resource_hints(\n foo_hint='should_be_overriden_by_subtransform',\n bar_hint='set_on_composite',\n baz_hint='set_on_composite',\n use_max_value_hint='100'))\n options = PortableOptions([\n '--resource_hint=baz_hint=should_be_overriden_by_composite',\n '--resource_hint=qux_hint=set_via_options',\n '--environment_type=PROCESS',\n '--environment_option=process_command=foo',\n '--sdk_location=container',\n ])\n environment = ProcessEnvironment.from_options(options)\n proto = Pipeline.to_runner_api(p, default_environment=environment)\n\n for t in proto.components.transforms.values():\n if \"CompositeTransform/SubTransform/Map\" in t.unique_name:\n environment = proto.components.environments.get(t.environment_id)\n self.assertEqual(\n environment.resource_hints.get('foo_urn'), b'set_on_subtransform')\n self.assertEqual(\n environment.resource_hints.get('bar_urn'), b'set_on_composite')\n self.assertEqual(\n environment.resource_hints.get('baz_urn'), b'set_on_composite')\n self.assertEqual(\n environment.resource_hints.get('qux_urn'), b'set_via_options')\n self.assertEqual(\n environment.resource_hints.get('use_max_value_urn'), b'100')\n found = True\n assert found\n\n def test_environments_with_same_resource_hints_are_reused(self):\n class HintX(ResourceHint):\n urn = 'X_urn'\n\n class HintY(ResourceHint):\n urn = 'Y_urn'\n\n class HintIsOdd(ResourceHint):\n urn = 'IsOdd_urn'\n\n ResourceHint.register_resource_hint('X', HintX)\n ResourceHint.register_resource_hint('Y', HintY)\n ResourceHint.register_resource_hint('IsOdd', HintIsOdd)\n\n p = beam.Pipeline()\n num_iter = 4\n for i in range(num_iter):\n _ = (\n p\n | f'NoHintCreate_{i}' >> beam.Create([1, 2])\n | f'NoHint_{i}' >> beam.Map(lambda x: x + 1))\n _ = (\n p\n | f'XCreate_{i}' >> beam.Create([1, 2])\n |\n f'HintX_{i}' >> beam.Map(lambda x: x + 1).with_resource_hints(X='X'))\n _ = (\n p\n | f'XYCreate_{i}' >> beam.Create([1, 2])\n | f'HintXY_{i}' >> beam.Map(lambda x: x + 1).with_resource_hints(\n X='X', Y='Y'))\n _ = (\n p\n | f'IsOddCreate_{i}' >> beam.Create([1, 2])\n | f'IsOdd_{i}' >>\n beam.Map(lambda x: x + 1).with_resource_hints(IsOdd=str(i % 2 != 0)))\n\n proto = Pipeline.to_runner_api(p)\n count_x = count_xy = count_is_odd = count_no_hints = 0\n env_ids = set()\n for _, t in proto.components.transforms.items():\n env = proto.components.environments[t.environment_id]\n if t.unique_name.startswith('HintX_'):\n count_x += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {'X_urn': b'X'})\n\n if t.unique_name.startswith('HintXY_'):\n count_xy += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {'X_urn': b'X', 'Y_urn': b'Y'})\n\n if t.unique_name.startswith('NoHint_'):\n count_no_hints += 1\n env_ids.add(t.environment_id)\n self.assertEqual(env.resource_hints, {})\n\n if t.unique_name.startswith('IsOdd_'):\n count_is_odd += 1\n env_ids.add(t.environment_id)\n self.assertTrue(\n env.resource_hints == {'IsOdd_urn': b'True'} or\n env.resource_hints == {'IsOdd_urn': b'False'})\n assert count_x == count_is_odd == count_xy == count_no_hints == num_iter\n assert num_iter > 1\n\n self.assertEqual(len(env_ids), 5)\n\n def test_multiple_application_of_the_same_transform_set_different_hints(self):\n class FooHint(ResourceHint):\n urn = 'foo_urn'\n\n class UseMaxValueHint(ResourceHint):\n urn = 'use_max_value_urn'\n\n @classmethod\n def get_merged_value(\n cls, outer_value, inner_value): # type: (bytes, bytes) -> bytes\n return ResourceHint._use_max(outer_value, inner_value)\n\n ResourceHint.register_resource_hint('foo_hint', FooHint)\n ResourceHint.register_resource_hint('use_max_value_hint', UseMaxValueHint)\n\n @beam.ptransform_fn\n def SubTransform(pcoll):\n return pcoll | beam.Map(lambda x: x + 1)\n\n @beam.ptransform_fn\n def CompositeTransform(pcoll):\n sub = SubTransform()\n return (\n pcoll\n | 'first' >> sub.with_resource_hints(foo_hint='first_application')\n | 'second' >> sub.with_resource_hints(foo_hint='second_application'))\n\n p = beam.Pipeline()\n _ = (p | beam.Create([1, 2]) | CompositeTransform())\n proto = Pipeline.to_runner_api(p)\n count = 0\n for t in proto.components.transforms.values():\n if \"CompositeTransform/first/Map\" in t.unique_name:\n environment = proto.components.environments.get(t.environment_id)\n self.assertEqual(\n b'first_application', environment.resource_hints.get('foo_urn'))\n count += 1\n if \"CompositeTransform/second/Map\" in t.unique_name:\n environment = proto.components.environments.get(t.environment_id)\n self.assertEqual(\n b'second_application', environment.resource_hints.get('foo_urn'))\n count += 1\n assert count == 2\n\n def test_environments_are_deduplicated(self):\n def file_artifact(path, hash, staged_name):\n return beam_runner_api_pb2.ArtifactInformation(\n type_urn=common_urns.artifact_types.FILE.urn,\n type_payload=beam_runner_api_pb2.ArtifactFilePayload(\n path=path, sha256=hash).SerializeToString(),\n role_urn=common_urns.artifact_roles.STAGING_TO.urn,\n role_payload=beam_runner_api_pb2.ArtifactStagingToRolePayload(\n staged_name=staged_name).SerializeToString(),\n )\n\n proto = beam_runner_api_pb2.Pipeline(\n components=beam_runner_api_pb2.Components(\n transforms={\n f'transform{ix}': beam_runner_api_pb2.PTransform(\n environment_id=f'e{ix}')\n for ix in range(8)\n },\n environments={\n # Same hash and destination.\n 'e1': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a1', 'x', 'dest')]),\n 'e2': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a2', 'x', 'dest')]),\n # Different hash.\n 'e3': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a3', 'y', 'dest')]),\n # Different destination.\n 'e4': beam_runner_api_pb2.Environment(\n dependencies=[file_artifact('a4', 'y', 'dest2')]),\n # Multiple files with same hash and destinations.\n 'e5': beam_runner_api_pb2.Environment(\n dependencies=[\n file_artifact('a1', 'x', 'dest'),\n file_artifact('b1', 'xb', 'destB')\n ]),\n 'e6': beam_runner_api_pb2.Environment(\n dependencies=[\n file_artifact('a2', 'x', 'dest'),\n file_artifact('b2', 'xb', 'destB')\n ]),\n # Overlapping, but not identical, files.\n 'e7': beam_runner_api_pb2.Environment(\n dependencies=[\n file_artifact('a1', 'x', 'dest'),\n file_artifact('b2', 'y', 'destB')\n ]),\n # Same files as first, but differing other properties.\n 'e0': beam_runner_api_pb2.Environment(\n resource_hints={'hint': b'value'},\n dependencies=[file_artifact('a1', 'x', 'dest')]),\n }))\n Pipeline.merge_compatible_environments(proto)\n\n # These environments are equivalent.\n self.assertEqual(\n proto.components.transforms['transform1'].environment_id,\n proto.components.transforms['transform2'].environment_id)\n\n self.assertEqual(\n proto.components.transforms['transform5'].environment_id,\n proto.components.transforms['transform6'].environment_id)\n\n # These are not.\n self.assertNotEqual(\n proto.components.transforms['transform1'].environment_id,\n proto.components.transforms['transform3'].environment_id)\n self.assertNotEqual(\n proto.components.transforms['transform4'].environment_id,\n proto.components.transforms['transform3'].environment_id)\n self.assertNotEqual(\n proto.components.transforms['transform6'].environment_id,\n proto.components.transforms['transform7'].environment_id)\n self.assertNotEqual(\n proto.components.transforms['transform1'].environment_id,\n proto.components.transforms['transform0'].environment_id)\n\n self.assertEqual(len(proto.components.environments), 6)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
37,
60,
63,
68,
85
]
}
|
[
37,
60,
63,
68,
85
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(12):
aux = []
for j in range(12):
aux.append(float(input()))
m.append(aux)
aux = []
<|reserved_special_token_0|>
for i in range(12):
soma += m[i][c]
<|reserved_special_token_0|>
print('%.1f' % resultado)
<|reserved_special_token_1|>
c = int(input())
t = input()
m = []
for i in range(12):
aux = []
for j in range(12):
aux.append(float(input()))
m.append(aux)
aux = []
soma = 0
for i in range(12):
soma += m[i][c]
resultado = soma / (t == 'S' and 1 or 12)
print('%.1f' % resultado)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
c = int(input())
t = input()
m = []
for i in range(12):
aux = []
for j in range(12):
aux.append(float(input()))
m.append(aux)
aux = []
soma = 0
for i in range(12):
soma += m[i][c]
resultado = soma / (t == 'S' and 1 or 12)
print('%.1f' % resultado)
|
flexible
|
{
"blob_id": "6edb1f99ca9af01f28322cbaf13f278e79b94e92",
"index": 5882,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(12):\n aux = []\n for j in range(12):\n aux.append(float(input()))\n m.append(aux)\n aux = []\n<mask token>\nfor i in range(12):\n soma += m[i][c]\n<mask token>\nprint('%.1f' % resultado)\n",
"step-3": "c = int(input())\nt = input()\nm = []\nfor i in range(12):\n aux = []\n for j in range(12):\n aux.append(float(input()))\n m.append(aux)\n aux = []\nsoma = 0\nfor i in range(12):\n soma += m[i][c]\nresultado = soma / (t == 'S' and 1 or 12)\nprint('%.1f' % resultado)\n",
"step-4": "# -*- coding: utf-8 -*-\n\nc = int(input())\nt = input()\nm = []\n\nfor i in range(12):\n aux = []\n for j in range(12):\n aux.append(float(input()))\n m.append(aux)\n aux = []\n\nsoma = 0\nfor i in range(12):\n soma += m[i][c]\n\nresultado = soma / (t == 'S' and 1 or 12)\nprint('%.1f' % resultado)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sample_items(num_items, shape, random_state=None):
"""
Randomly sample a number of items.
Parameters
----------
num_items: int
Total number of items from which we should sample:
the maximum value of a sampled item id will be smaller
than this.
shape: int or tuple of ints
Shape of the sampled array.
random_state: np.random.RandomState instance, optional
Random state to use for sampling.
Returns
-------
items: np.array of shape [shape]
Sampled item ids.
"""
if random_state is None:
random_state = np.random.RandomState()
items = random_state.randint(0, num_items, shape, dtype=np.int64)
return items
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
def sample_items(num_items, shape, random_state=None):
"""
Randomly sample a number of items.
Parameters
----------
num_items: int
Total number of items from which we should sample:
the maximum value of a sampled item id will be smaller
than this.
shape: int or tuple of ints
Shape of the sampled array.
random_state: np.random.RandomState instance, optional
Random state to use for sampling.
Returns
-------
items: np.array of shape [shape]
Sampled item ids.
"""
if random_state is None:
random_state = np.random.RandomState()
items = random_state.randint(0, num_items, shape, dtype=np.int64)
return items
<|reserved_special_token_1|>
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing functions for negative item sampling.
"""
import numpy as np
def sample_items(num_items, shape, random_state=None):
"""
Randomly sample a number of items.
Parameters
----------
num_items: int
Total number of items from which we should sample:
the maximum value of a sampled item id will be smaller
than this.
shape: int or tuple of ints
Shape of the sampled array.
random_state: np.random.RandomState instance, optional
Random state to use for sampling.
Returns
-------
items: np.array of shape [shape]
Sampled item ids.
"""
if random_state is None:
random_state = np.random.RandomState()
items = random_state.randint(0, num_items, shape, dtype=np.int64)
return items
|
flexible
|
{
"blob_id": "d414e4497bae23e4273526c0bbdecd23ed665cac",
"index": 4857,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sample_items(num_items, shape, random_state=None):\n \"\"\"\n Randomly sample a number of items.\n\n Parameters\n ----------\n\n num_items: int\n Total number of items from which we should sample:\n the maximum value of a sampled item id will be smaller\n than this.\n shape: int or tuple of ints\n Shape of the sampled array.\n random_state: np.random.RandomState instance, optional\n Random state to use for sampling.\n\n Returns\n -------\n\n items: np.array of shape [shape]\n Sampled item ids.\n \"\"\"\n if random_state is None:\n random_state = np.random.RandomState()\n items = random_state.randint(0, num_items, shape, dtype=np.int64)\n return items\n",
"step-3": "<mask token>\nimport numpy as np\n\n\ndef sample_items(num_items, shape, random_state=None):\n \"\"\"\n Randomly sample a number of items.\n\n Parameters\n ----------\n\n num_items: int\n Total number of items from which we should sample:\n the maximum value of a sampled item id will be smaller\n than this.\n shape: int or tuple of ints\n Shape of the sampled array.\n random_state: np.random.RandomState instance, optional\n Random state to use for sampling.\n\n Returns\n -------\n\n items: np.array of shape [shape]\n Sampled item ids.\n \"\"\"\n if random_state is None:\n random_state = np.random.RandomState()\n items = random_state.randint(0, num_items, shape, dtype=np.int64)\n return items\n",
"step-4": "# Copyright 2018 The Cornac Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nModule containing functions for negative item sampling.\n\"\"\"\n\nimport numpy as np\n\n\ndef sample_items(num_items, shape, random_state=None):\n \"\"\"\n Randomly sample a number of items.\n\n Parameters\n ----------\n\n num_items: int\n Total number of items from which we should sample:\n the maximum value of a sampled item id will be smaller\n than this.\n shape: int or tuple of ints\n Shape of the sampled array.\n random_state: np.random.RandomState instance, optional\n Random state to use for sampling.\n\n Returns\n -------\n\n items: np.array of shape [shape]\n Sampled item ids.\n \"\"\"\n\n if random_state is None:\n random_state = np.random.RandomState()\n\n items = random_state.randint(0, num_items, shape, dtype=np.int64)\n\n return items\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from job_description import JobDescription
from resume import Resume
from resume_manager import ResumeManager
|
flexible
|
{
"blob_id": "a998433e45c1d5135749c5164e8ec1f2eb0e572a",
"index": 1693,
"step-1": "<mask token>\n",
"step-2": "from job_description import JobDescription\nfrom resume import Resume\nfrom resume_manager import ResumeManager\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while len(card) > 1:
topCard = card.pop(0)
trash.append(topCard)
card.append(card.pop(0))
<|reserved_special_token_0|>
for i in range(len(trash)):
outputStr += str(trash[i]) + ' '
outputStr += str(card[0])
print(outputStr)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
N = int(input())
card = [int(x + 1) for x in range(N)]
trash = []
while len(card) > 1:
topCard = card.pop(0)
trash.append(topCard)
card.append(card.pop(0))
outputStr = ''
for i in range(len(trash)):
outputStr += str(trash[i]) + ' '
outputStr += str(card[0])
print(outputStr)
<|reserved_special_token_1|>
import sys
N = int(input())
card = [int(x + 1) for x in range(N)]
trash = []
while len(card) > 1:
topCard = card.pop(0)
trash.append(topCard)
card.append(card.pop(0))
outputStr = ''
for i in range(len(trash)):
outputStr += str(trash[i]) + ' '
outputStr += str(card[0])
print(outputStr)
<|reserved_special_token_1|>
import sys
N = int(input())
card = [int(x+1) for x in range(N)]
trash = []
while len(card)>1:
topCard = card.pop(0)
trash.append(topCard)
card.append(card.pop(0))
outputStr = ""
for i in range(len(trash)):
outputStr += str(trash[i]) + " "
outputStr += str(card[0])
print(outputStr)
|
flexible
|
{
"blob_id": "90e475dfd128689dd4e1a5375ced6e4cbfb73c07",
"index": 7860,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile len(card) > 1:\n topCard = card.pop(0)\n trash.append(topCard)\n card.append(card.pop(0))\n<mask token>\nfor i in range(len(trash)):\n outputStr += str(trash[i]) + ' '\noutputStr += str(card[0])\nprint(outputStr)\n",
"step-3": "<mask token>\nN = int(input())\ncard = [int(x + 1) for x in range(N)]\ntrash = []\nwhile len(card) > 1:\n topCard = card.pop(0)\n trash.append(topCard)\n card.append(card.pop(0))\noutputStr = ''\nfor i in range(len(trash)):\n outputStr += str(trash[i]) + ' '\noutputStr += str(card[0])\nprint(outputStr)\n",
"step-4": "import sys\nN = int(input())\ncard = [int(x + 1) for x in range(N)]\ntrash = []\nwhile len(card) > 1:\n topCard = card.pop(0)\n trash.append(topCard)\n card.append(card.pop(0))\noutputStr = ''\nfor i in range(len(trash)):\n outputStr += str(trash[i]) + ' '\noutputStr += str(card[0])\nprint(outputStr)\n",
"step-5": "import sys\n\nN = int(input())\ncard = [int(x+1) for x in range(N)]\ntrash = []\nwhile len(card)>1:\n topCard = card.pop(0)\n trash.append(topCard)\n card.append(card.pop(0))\n\noutputStr = \"\"\nfor i in range(len(trash)):\n outputStr += str(trash[i]) + \" \"\n\noutputStr += str(card[0])\nprint(outputStr)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pd.set_option('display.max_columns', None)
<|reserved_special_token_0|>
file_open.close()
<|reserved_special_token_0|>
for i in range(len(df)):
count = []
count = df2['Newappend1'][(df2['Newappend1'] > Newtaxiout_time[i]) & (
df2['Newappend1'] < time[i] * 60 + Newtaxiout_time[i])]
append1_res.append(len(count))
<|reserved_special_token_0|>
for i in range(len(df)):
count = []
count = df2['Newappend2'][(df2['Newappend2'] > Newtaxiout_time[i]) & (
df2['Newappend2'] < time[i] * 60 + Newtaxiout_time[i])]
append2_res.append(len(count))
<|reserved_special_token_0|>
df.to_csv('df_11_9.csv', index=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pd.set_option('display.max_columns', None)
file_name = 'data_11_8.csv'
file_open = open(file_name)
df = pd.read_csv(file_open)
file_open.close()
Newtaxiout_time = df['Newtaxiout_time']
time = df['time']
file_name2 = 'df_append.csv'
file_open2 = open(file_name2)
df2 = pd.read_csv(file_open2)
append1_res = []
for i in range(len(df)):
count = []
count = df2['Newappend1'][(df2['Newappend1'] > Newtaxiout_time[i]) & (
df2['Newappend1'] < time[i] * 60 + Newtaxiout_time[i])]
append1_res.append(len(count))
append2_res = []
for i in range(len(df)):
count = []
count = df2['Newappend2'][(df2['Newappend2'] > Newtaxiout_time[i]) & (
df2['Newappend2'] < time[i] * 60 + Newtaxiout_time[i])]
append2_res.append(len(count))
df['append1_res'] = append1_res
df['append2_res'] = append2_res
df.to_csv('df_11_9.csv', index=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pandas as pd
pd.set_option('display.max_columns', None)
file_name = 'data_11_8.csv'
file_open = open(file_name)
df = pd.read_csv(file_open)
file_open.close()
Newtaxiout_time = df['Newtaxiout_time']
time = df['time']
file_name2 = 'df_append.csv'
file_open2 = open(file_name2)
df2 = pd.read_csv(file_open2)
append1_res = []
for i in range(len(df)):
count = []
count = df2['Newappend1'][(df2['Newappend1'] > Newtaxiout_time[i]) & (
df2['Newappend1'] < time[i] * 60 + Newtaxiout_time[i])]
append1_res.append(len(count))
append2_res = []
for i in range(len(df)):
count = []
count = df2['Newappend2'][(df2['Newappend2'] > Newtaxiout_time[i]) & (
df2['Newappend2'] < time[i] * 60 + Newtaxiout_time[i])]
append2_res.append(len(count))
df['append1_res'] = append1_res
df['append2_res'] = append2_res
df.to_csv('df_11_9.csv', index=False)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 17:14:14 2018
@author: Winry
"""
import pandas as pd
# 显示所有的列
pd.set_option('display.max_columns', None)
# 读取数据
file_name = "data_11_8.csv"
file_open = open(file_name)
df = pd.read_csv(file_open)
file_open.close()
Newtaxiout_time = df['Newtaxiout_time']
time = df['time']
file_name2 = "df_append.csv"
file_open2 = open(file_name2)
df2 = pd.read_csv(file_open2)
# append1
append1_res = []
for i in range(len(df)):
count = []
count = df2["Newappend1"][(df2["Newappend1"] > Newtaxiout_time[i]) & (df2["Newappend1"] < time[i]*60+Newtaxiout_time[i])]
append1_res.append(len(count))
# append2
append2_res = []
for i in range(len(df)):
count = []
count = df2["Newappend2"][(df2["Newappend2"] > Newtaxiout_time[i]) & (df2["Newappend2"] < time[i]*60+Newtaxiout_time[i])]
append2_res.append(len(count))
df['append1_res'] = append1_res
df['append2_res'] = append2_res
df.to_csv('df_11_9.csv',index=False)
|
flexible
|
{
"blob_id": "f5a474cdc8aa22322b252b980c0334a9db21bd5c",
"index": 9300,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npd.set_option('display.max_columns', None)\n<mask token>\nfile_open.close()\n<mask token>\nfor i in range(len(df)):\n count = []\n count = df2['Newappend1'][(df2['Newappend1'] > Newtaxiout_time[i]) & (\n df2['Newappend1'] < time[i] * 60 + Newtaxiout_time[i])]\n append1_res.append(len(count))\n<mask token>\nfor i in range(len(df)):\n count = []\n count = df2['Newappend2'][(df2['Newappend2'] > Newtaxiout_time[i]) & (\n df2['Newappend2'] < time[i] * 60 + Newtaxiout_time[i])]\n append2_res.append(len(count))\n<mask token>\ndf.to_csv('df_11_9.csv', index=False)\n",
"step-3": "<mask token>\npd.set_option('display.max_columns', None)\nfile_name = 'data_11_8.csv'\nfile_open = open(file_name)\ndf = pd.read_csv(file_open)\nfile_open.close()\nNewtaxiout_time = df['Newtaxiout_time']\ntime = df['time']\nfile_name2 = 'df_append.csv'\nfile_open2 = open(file_name2)\ndf2 = pd.read_csv(file_open2)\nappend1_res = []\nfor i in range(len(df)):\n count = []\n count = df2['Newappend1'][(df2['Newappend1'] > Newtaxiout_time[i]) & (\n df2['Newappend1'] < time[i] * 60 + Newtaxiout_time[i])]\n append1_res.append(len(count))\nappend2_res = []\nfor i in range(len(df)):\n count = []\n count = df2['Newappend2'][(df2['Newappend2'] > Newtaxiout_time[i]) & (\n df2['Newappend2'] < time[i] * 60 + Newtaxiout_time[i])]\n append2_res.append(len(count))\ndf['append1_res'] = append1_res\ndf['append2_res'] = append2_res\ndf.to_csv('df_11_9.csv', index=False)\n",
"step-4": "<mask token>\nimport pandas as pd\npd.set_option('display.max_columns', None)\nfile_name = 'data_11_8.csv'\nfile_open = open(file_name)\ndf = pd.read_csv(file_open)\nfile_open.close()\nNewtaxiout_time = df['Newtaxiout_time']\ntime = df['time']\nfile_name2 = 'df_append.csv'\nfile_open2 = open(file_name2)\ndf2 = pd.read_csv(file_open2)\nappend1_res = []\nfor i in range(len(df)):\n count = []\n count = df2['Newappend1'][(df2['Newappend1'] > Newtaxiout_time[i]) & (\n df2['Newappend1'] < time[i] * 60 + Newtaxiout_time[i])]\n append1_res.append(len(count))\nappend2_res = []\nfor i in range(len(df)):\n count = []\n count = df2['Newappend2'][(df2['Newappend2'] > Newtaxiout_time[i]) & (\n df2['Newappend2'] < time[i] * 60 + Newtaxiout_time[i])]\n append2_res.append(len(count))\ndf['append1_res'] = append1_res\ndf['append2_res'] = append2_res\ndf.to_csv('df_11_9.csv', index=False)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 8 17:14:14 2018\n\n@author: Winry\n\"\"\"\n\nimport pandas as pd\n# 显示所有的列\npd.set_option('display.max_columns', None)\n\n# 读取数据\nfile_name = \"data_11_8.csv\"\nfile_open = open(file_name)\ndf = pd.read_csv(file_open)\nfile_open.close()\n\nNewtaxiout_time = df['Newtaxiout_time']\ntime = df['time']\n\nfile_name2 = \"df_append.csv\"\nfile_open2 = open(file_name2)\ndf2 = pd.read_csv(file_open2)\n\n# append1\n\nappend1_res = []\nfor i in range(len(df)):\n count = []\n count = df2[\"Newappend1\"][(df2[\"Newappend1\"] > Newtaxiout_time[i]) & (df2[\"Newappend1\"] < time[i]*60+Newtaxiout_time[i])]\n append1_res.append(len(count))\n\n\n# append2\nappend2_res = []\nfor i in range(len(df)):\n count = []\n count = df2[\"Newappend2\"][(df2[\"Newappend2\"] > Newtaxiout_time[i]) & (df2[\"Newappend2\"] < time[i]*60+Newtaxiout_time[i])]\n append2_res.append(len(count))\n \ndf['append1_res'] = append1_res\ndf['append2_res'] = append2_res\ndf.to_csv('df_11_9.csv',index=False)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from SMP.motion_planner.node import PriorityNode
import numpy as np
from heapq import nsmallest
import sys
from SMP.motion_planner.plot_config import DefaultPlotConfig
from SMP.motion_planner.search_algorithms.best_first_search import GreedyBestFirstSearch
# imports for route planner:
class StudentMotionPlanner(GreedyBestFirstSearch):
"""
Motion planner implementation by students.
Note that you may inherit from any given motion planner as you wish, or come up with your own planner.
Here as an example, the planner is inherited from the GreedyBestFirstSearch planner.
"""
def __init__(self, scenario, planningProblem, automata, plot_config=DefaultPlotConfig):
super().__init__(scenario=scenario, planningProblem=planningProblem, automaton=automata,
plot_config=plot_config)
def evaluation_function(self, node_current: PriorityNode) -> float:
########################################################################
# todo: Implement your own evaluation function here. #
########################################################################
# Copied from greedy best first search:
"""
Evaluation function of GBFS is f(n) = h(n)
"""
node_current.priority = self.heuristic_function(node_current=node_current)
return node_current.priority
def heuristic_function(self, node_current: PriorityNode) -> float:
########################################################################
# todo: Implement your own heuristic cost calculation here. #
# Hint: #
# Use the State of the current node and the information from the #
# planning problem, as well as from the scenario. #
# Some helper functions for your convenience can be found in #
# ./search_algorithms/base_class.py #
########################################################################
"""
Function that evaluates the heuristic cost h(n) in student class.
Created by Mohamed A. Abdellaoui 10.01.2021
"""
output_logs = False
if output_logs:
print("##################")
print("current time step: ", node_current.list_paths[-1][-1].time_step)
print("current problem mode", self.planningProblemType)
print("depth tree: ", node_current.depth_tree)
currentorient = node_current.list_paths[-1][-1].orientation
currentpos = node_current.list_paths[-1][-1].position
currenttimestep = node_current.list_paths[-1][-1].time_step
currentVel = node_current.list_paths[-1][-1].velocity
# Test if reached goal:
if self.reached_goal(node_current.list_paths[-1]):
return 0.0
# Test if route planner failed to find a path:
if self.routeplannerresult is None:
return np.inf
############ Detect cars in front:
# calc cost based on distance to gool following the refrence path:
# loop through all obstacles at time step x and find if any is close of current pos:
if not self.disableObstAvoidance:
for obst in self.list_obstacles:
obstPos = obst.state_at_time(currenttimestep)
if currentorient is not None and obstPos is not None:
disttoobst = self.euclidean_distance(currentpos, obstPos.position)
lookaheadVar = 1.375 * currentVel + 2.5
if disttoobst <= lookaheadVar:
# calc orientation diff between car and obstacle:
vectorToObst = np.array([currentpos, obstPos.position])
vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)
orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)
if abs(orientdiff) <= 0.261799:
if not 'velocity' in obstPos.attributes:
continue
if node_current.list_paths[-1][-1].velocity > obstPos.velocity and obstPos.velocity != 0:
return np.inf
# get index of closest object to the ego vehicle:
index_smallest_dist = self.get_index_nearest_obst_infront(node_current)
# use the index to locate vehicle to calc cost:
if index_smallest_dist != -1:
# found the index of vehicle with smallest distance to ego car:
obst = self.list_obstacles[index_smallest_dist]
obstPos = obst.state_at_time(currenttimestep)
if obstPos is not None and 'velocity' in obstPos.attributes:
if obstPos.velocity == 0:
cost = node_current.list_paths[-1][-1].velocity
return cost
if node_current.list_paths[-1][-1].velocity > obstPos.velocity:
return np.inf
cost = abs(node_current.list_paths[-1][-1].velocity - obstPos.velocity)
return cost
#########################################################
# Decide based on planning problem type how to calculate cost
if self.planningProblemType == 'ModeA':
# Call function for planning problem with desired time, position, speed and orientation
cost = self.cost_for_modeA_problem(node_current, output_logs)
if output_logs:
print("Cost from modeA cost func: ", cost)
if cost < 0:
return 0
return cost
elif self.planningProblemType == 'ModeB':
# Call function for planning problem with desired time, position and velocity:
cost = self.cost_for_modeB_problem(node_current, output_logs)
if output_logs:
print("Cost from modeB cost func: ", cost)
if cost < 0:
return 0
return cost
elif self.planningProblemType == 'ModeC':
# Call function for planning problem with desired time, position and orientation:
cost = self.cost_for_modeC_problem(node_current, output_logs)
if output_logs:
print("Cost from modeB cost func: ", cost)
if cost < 0:
return 0
return cost
elif self.planningProblemType == 'ModeD':
# Call function for planning problem with desired time and position:
cost = self.cost_for_modeD_problem(node_current, output_logs)
if output_logs:
print("Cost from modeB cost func: ", cost)
if cost < 0:
return 0
return cost
elif self.planningProblemType == 'Survival':
# Call function for planning problem with desired time:
cost = self.cost_for_Survival_problem(node_current, output_logs)
if output_logs:
print("Cost from modeB cost func: ", cost)
if cost < 0:
return 0
return cost
def cost_for_modeA_problem(self, node_current, output_logs):
# Function for planning problem with desired time, position, speed and orientation
if self.position_desired is None:
if output_logs:
print("exit Cost function because position desired is None!")
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
else:
velocity = node_current.list_paths[-1][-1].velocity
path_last = node_current.list_paths[-1]
if np.isclose(velocity, 0):
return np.inf
else:
# Calc Variables:
distance = self.calc_euclidean_distance(current_node=node_current)
angleToGoal = self.calc_angle_to_goal(path_last[-1])
orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)
orientationToGoalDiffdegree = (abs(orientationToGoalDiff) * 180) / 3.14
desired_orient = (self.orientation_desired.end + self.orientation_desired.start) / 2
desired_velocity = (self.velocity_desired.start + self.velocity_desired.end) / 2
diff_desiredOrient = abs(self.calc_orientation_diff(desired_orient, path_last[-1].orientation))
diff_deiredVelocity = abs(velocity - desired_velocity)
angle_intervall = abs(abs(self.orientation_desired.start) - abs(self.orientation_desired.end))
# Output data for debugging:
if output_logs:
print("Distance to goal of current node is: ", distance)
print("Velocity of current node is: ", velocity)
print("Orientation of current position: ", node_current.list_paths[-1][-1].orientation)
print("Angle to goal of current node is: ", angleToGoal)
print("orientation diff to goal of current node is(deg): ", orientationToGoalDiffdegree)
print("diff desired orient of current node is(deg): ", diff_desiredOrient)
print("diff desired velocity of current node is(deg): ", diff_deiredVelocity)
# test 16.01:
current_orient = path_last[-1].orientation
if distance <= 10:
if current_orient < self.orientation_desired.start or current_orient > self.orientation_desired.end:
return np.inf
if velocity < self.velocity_desired.start or velocity > self.velocity_desired.end:
return np.inf
weight = 10
# if very colse to goal, minimize the diff velocity and diff orient
cost = (distance / velocity) + weight* diff_deiredVelocity + weight* diff_desiredOrient
#cost = distance + diff_desiredOrient + diff_deiredVelocity
return cost
def cost_for_modeB_problem(self, node_current, output_logs):
# Function for planning problem with desired time, position, speed
if self.position_desired is None:
if output_logs:
print("exit Cost function because position desired is None!")
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
else:
velocity = node_current.list_paths[-1][-1].velocity
path_last = node_current.list_paths[-1]
if np.isclose(velocity, 0):
return np.inf
else:
# Calc Variables:
distance = self.calc_distance_to_goal_from_point(node_current.list_paths[-1][-1])
angleToGoal = self.calc_angle_to_goal(path_last[-1])
orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)
orientationToGoalDiffdegree = (abs(orientationToGoalDiff)*180)/3.14
desired_velocity = (self.velocity_desired.start + self.velocity_desired.end)/2
diff_deiredVelocity = abs(velocity - desired_velocity)
self.test_if_in_goal_lanelet(node_current)
# Output data for debugging:
if output_logs:
print("Distance to goal of current node is: ", distance)
print("Velocity of current node is: ", velocity)
print("Orientation of current position: ",node_current.list_paths[-1][-1].orientation)
print("Angle to goal of current node is: ", angleToGoal)
print("orientation diff to goal of current node is(deg): ", orientationToGoalDiffdegree)
print("diff desired velocity of current node is(deg): ", diff_deiredVelocity)
# If very close to target but time is still not reached:
#if distance <= 0.1 and node_current.list_paths[-1][-1].time_step < self.time_desired.start:
# return self.time_desired.start - node_current.list_paths[-1][-1].time_step * 0.01
if self.planningProblem.goal.is_reached_only_pos(node_current.list_paths[-1][-1]):
cost = (self.time_desired.start - node_current.list_paths[-1][-1].time_step) * 0.01
cost = cost + diff_deiredVelocity + velocity *0.01
return cost
cost = ( distance / velocity ) + 2 * diff_deiredVelocity + velocity*0.01
return cost
def cost_for_modeC_problem(self, node_current, output_logs):
# Function for planning problem with desired time, position, speed and orientation
if self.position_desired is None:
if output_logs:
print("exit Cost function because position desired is None!")
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
else:
velocity = node_current.list_paths[-1][-1].velocity
path_last = node_current.list_paths[-1]
if np.isclose(velocity, 0):
return np.inf
else:
# Calc Variables:
distance = self.calc_euclidean_distance(current_node=node_current)
angleToGoal = self.calc_angle_to_goal(path_last[-1])
orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)
orientationToGoalDiffdegree = (abs(orientationToGoalDiff)*180)/3.14
desired_orient = (self.orientation_desired.end + self.orientation_desired.start) / 2
diff_desiredOrient = self.calc_orientation_diff(desired_orient, path_last[-1].orientation)
angle_intervall = abs(abs(self.orientation_desired.start) - abs(self.orientation_desired.end))
# Calcualte distance between currrent position and reference path:
arry = node_current.list_paths[-1][-1].position
a = np.array([arry[0], arry[1]])
if self.routeplannerresult is not None:
distance_to_refrence = self.calc_distance_to_nearest_point(self.routeplannerresult.reference_path,
a)
else:
distance_to_refrence = 0
# Output data for debugging:
if output_logs:
print("distance to reference path: ", distance_to_refrence)
print("Distance to goal of current node is: ", distance)
print("Velocity of current node is: ", velocity)
print("Orientation of current position: ",node_current.list_paths[-1][-1].orientation)
print("Angle to goal of current node is: ", angleToGoal)
print("orientation diff to goal of current node is(deg): ", orientationToGoalDiffdegree)
print("diff desired orient of current node is(deg): ", diff_desiredOrient)
# If very close to target but time is still not reached:
if distance <= 0.1 and node_current.list_paths[-1][-1].time_step < self.time_desired.start:
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
if self.planningProblem.goal.is_reached_only_pos(node_current.list_paths[-1][-1]):
cost = (self.time_desired.start - node_current.list_paths[-1][-1].time_step) * 0.01
cost = cost + diff_desiredOrient + velocity *0.01
return cost
cost = ( distance / velocity ) + 2 * diff_desiredOrient + velocity*0.01
return cost
def cost_for_modeD_problem(self, node_current, output_logs):
totaltogoal = self.calc_distance_to_goal_from_point(node_current.list_paths[-1][-1])
if self.position_desired is None:
if output_logs:
print("exit Cost function because position desired is None!")
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
else:
if self.planningProblem.goal.is_reached_only_pos(node_current.list_paths[-1][-1]):
return (self.time_desired.start - node_current.list_paths[-1][-1].time_step) *0.01
velocity = node_current.list_paths[-1][-1].velocity
if np.isclose(velocity, 0):
return np.inf
cost = totaltogoal / node_current.list_paths[-1][-1].velocity
return cost
def cost_for_Survival_problem(self, node_current, output_logs):
currentorient = node_current.list_paths[-1][-1].orientation
currentpos = node_current.list_paths[-1][-1].position
currenttimestep = node_current.list_paths[-1][-1].time_step
currentVel = node_current.list_paths[-1][-1].velocity
for obst in self.list_obstacles:
obstPos = obst.state_at_time(currenttimestep)
if currentorient is not None and obstPos is not None:
disttoobst = self.euclidean_distance(currentpos, obstPos.position)
lookaheadVar = 1.375 * currentVel + 2.5
if disttoobst <= lookaheadVar:
# calc orientation diff between car and obstacle:
vectorToObst = np.array([currentpos, obstPos.position])
vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)
orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)
if abs(orientdiff) <= 0.261799:
if not 'velocity' in obstPos.attributes:
continue
if node_current.list_paths[-1][-1].velocity > obstPos.velocity and obstPos.velocity != 0:
return np.inf
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
def calc_distance_to_ref_from_point(self, state):
#calc distance of points to each point of refrence path:
currentpos = state.position
distances = []
for p in self.refPathParsedPnts:
distances.append(self.euclidean_distance(currentpos, p))
smallest_points = nsmallest(2, distances)
index1 = distances.index(smallest_points[0])
index2 = distances.index(smallest_points[1])
p1 = self.refPathParsedPnts[index1]
p2 = self.refPathParsedPnts[index2]
distance_to_refrence = np.abs(np.cross(p2 - p1, currentpos - p1) / np.linalg.norm(p2 - p1))
return distance_to_refrence
def calc_distance_to_goal_from_point(self, state):
#calc distance of points to each point of refrence path:
currentpos = state.position
distances = []
for p in self.refPathParsedPnts:
distances.append(self.euclidean_distance(currentpos, p))
index_smallest_dist = distances.index(min(distances))
totaltogoal = 0
for p in range(index_smallest_dist, len(self.refPathParsedPnts) - 1):
totaltogoal = totaltogoal + self.euclidean_distance(self.refPathParsedPnts[p],self.refPathParsedPnts[p+1])
return totaltogoal
def get_index_nearest_obst_infront(self,node_current):
# loop through all obstacles at time step x and find if any is close of current pos:
currentorient = node_current.list_paths[-1][-1].orientation
currentpos = node_current.list_paths[-1][-1].position
currenttimestep = node_current.list_paths[-1][-1].time_step
currentVel = node_current.list_paths[-1][-1].velocity
disttoobst = [np.inf] * len(self.list_obstacles)
for i in range(len(self.list_obstacles)):
obst = self.list_obstacles[i]
obstPos = obst.state_at_time(currenttimestep)
if currentorient is not None and obstPos is not None:
dist = self.euclidean_distance(currentpos, obstPos.position)
lookaheadVar = 1.375 * currentVel + 2.5
if dist <= lookaheadVar:
# calc orientation diff between car and obstacle:
vectorToObst = np.array([currentpos, obstPos.position])
vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)
orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)
if abs(orientdiff) <= 0.261799:
disttoobst[i]= dist
else:
disttoobst[i]= np.inf
else:
disttoobst[i]= np.inf
index_smallest_dist = disttoobst.index(min(disttoobst))
if disttoobst[index_smallest_dist] == np.inf:
index_smallest_dist = -1
return index_smallest_dist
def test_if_in_goal_lanelet(self, node_current):
pos = [node_current.list_paths[-1][-1].position]
currentlanelet = self.scenario.lanelet_network.find_lanelet_by_position(pos)
currentlanelet = currentlanelet[0][0]
#result = self.is_goal_in_lane(currentlanelet)
result = False
if self.planningProblem.goal.lanelets_of_goal_position is not None:
if currentlanelet in self.planningProblem.goal.lanelets_of_goal_position.get(0):
result = True
return result
def cost_for_modeA_problem_old(self, node_current, output_logs):
# Function for planning problem with desired time, position, speed and orientation
if self.position_desired is None:
if output_logs:
print("exit Cost function because position desired is None!")
return self.time_desired.start - node_current.list_paths[-1][-1].time_step
else:
velocity = node_current.list_paths[-1][-1].velocity
path_last = node_current.list_paths[-1]
if np.isclose(velocity, 0):
return np.inf
else:
# Calc Variables:
distance = self.calc_euclidean_distance(current_node=node_current)
angleToGoal = self.calc_angle_to_goal(path_last[-1])
orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)
orientationToGoalDiffdegree = (abs(orientationToGoalDiff)*180)/3.14
desired_orient = (self.orientation_desired.end + self.orientation_desired.start) / 2
desired_velocity = (self.velocity_desired.start + self.velocity_desired.end)/2
diff_desiredOrient = self.calc_orientation_diff(desired_orient, path_last[-1].orientation)
diff_deiredVelocity = abs(velocity - desired_velocity)
angle_intervall = abs(abs(self.orientation_desired.start) - abs(self.orientation_desired.end))
# Output data for debugging:
if output_logs:
print("Distance to goal of current node is: ", distance)
print("Velocity of current node is: ", velocity)
print("Orientation of current position: ",node_current.list_paths[-1][-1].orientation)
print("Angle to goal of current node is: ", angleToGoal)
print("orientation diff to goal of current node is(deg): ", orientationToGoalDiffdegree)
print("diff desired orient of current node is(deg): ", diff_desiredOrient)
print("diff desired velocity of current node is(deg): ", diff_deiredVelocity)
# if very colse to goal, minimize the diff velocity and diff orient
if distance <= 1:
desired_vel_weight = 1
desired_orient_weight = 1
cost = desired_vel_weight * diff_deiredVelocity
if angle_intervall < 1 and angle_intervall != 0:
cost = cost + desired_orient_weight * diff_desiredOrient
return cost
# If very close to target but time is still not reached:
if distance <= 0.1 and node_current.list_paths[-1][-1].time_step < self.time_desired.start:
return (self.time_desired.start - node_current.list_paths[-1][-1].time_step) *0.001
# check if goal in in field of view:
if orientationToGoalDiffdegree > 45:
# goal is not in field of view:
# give more weight to speed and follow reference path blindly:
# block to differentiate between large distance to goal and small distance:
if distance >= 10: # too far away from target, just follow the least distance and target lanelet.
velocity_weight = 1
cost = distance / velocity
return cost
if distance < 10 and distance >= 5: # almost close, reduce speed.
return np.inf
if distance < 5: # very close andjust orientation angle..
return np.inf
else:
# goal is in field of view:
# give more weight to distance and speed and orientation goals:
# goal is not in field of view:
# give more weight to speed and follow reference path blindly:
# block to differentiate between large distance to goal and small distance:
if distance >= 10: # too far away from target, just follow the least distance and target lanelet.
velocity_weight = 1
cost = distance / velocity * velocity_weight
return cost
if distance < 10 and distance >= 5: # almost close, reduce speed.
velocity_weight = 0.5
desired_vel_weight = 1
desired_orient_weight = 1
cost = distance / velocity
cost = cost + desired_vel_weight * diff_deiredVelocity
if angle_intervall < 1 and angle_intervall != 0:
cost = cost + desired_orient_weight * diff_desiredOrient
return cost
if distance < 5: # very close andjust orientation angle..
cost = distance / velocity
desired_vel_weight = 3
desired_orient_weight = 3
cost = cost + desired_vel_weight * diff_deiredVelocity
if angle_intervall < 1 and angle_intervall != 0:
cost = cost + desired_orient_weight * diff_desiredOrient
return cost
|
normal
|
{
"blob_id": "6ecbe119c8a14776373d165dc05e81f91084893c",
"index": 4229,
"step-1": "<mask token>\n\n\nclass StudentMotionPlanner(GreedyBestFirstSearch):\n <mask token>\n\n def __init__(self, scenario, planningProblem, automata, plot_config=\n DefaultPlotConfig):\n super().__init__(scenario=scenario, planningProblem=planningProblem,\n automaton=automata, plot_config=plot_config)\n\n def evaluation_function(self, node_current: PriorityNode) ->float:\n \"\"\"\n Evaluation function of GBFS is f(n) = h(n)\n \"\"\"\n node_current.priority = self.heuristic_function(node_current=\n node_current)\n return node_current.priority\n <mask token>\n <mask token>\n <mask token>\n\n def cost_for_modeC_problem(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_euclidean_distance(current_node=\n node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_orient = (self.orientation_desired.end + self.\n orientation_desired.start) / 2\n diff_desiredOrient = self.calc_orientation_diff(desired_orient,\n path_last[-1].orientation)\n angle_intervall = abs(abs(self.orientation_desired.start) -\n abs(self.orientation_desired.end))\n arry = node_current.list_paths[-1][-1].position\n a = np.array([arry[0], arry[1]])\n if self.routeplannerresult is not None:\n distance_to_refrence = self.calc_distance_to_nearest_point(\n self.routeplannerresult.reference_path, a)\n else:\n distance_to_refrence = 0\n if output_logs:\n print('distance to reference path: ', distance_to_refrence)\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired orient of current node is(deg): ',\n diff_desiredOrient)\n if distance <= 0.1 and node_current.list_paths[-1][-1\n ].time_step < self.time_desired.start:\n return self.time_desired.start - node_current.list_paths[-1\n ][-1].time_step\n if self.planningProblem.goal.is_reached_only_pos(node_current\n .list_paths[-1][-1]):\n cost = (self.time_desired.start - node_current.\n list_paths[-1][-1].time_step) * 0.01\n cost = cost + diff_desiredOrient + velocity * 0.01\n return cost\n cost = (distance / velocity + 2 * diff_desiredOrient + \n velocity * 0.01)\n return cost\n\n def cost_for_modeD_problem(self, node_current, output_logs):\n totaltogoal = self.calc_distance_to_goal_from_point(node_current.\n list_paths[-1][-1])\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n if self.planningProblem.goal.is_reached_only_pos(node_current.\n list_paths[-1][-1]):\n return (self.time_desired.start - node_current.list_paths[-\n 1][-1].time_step) * 0.01\n velocity = node_current.list_paths[-1][-1].velocity\n if np.isclose(velocity, 0):\n return np.inf\n cost = totaltogoal / node_current.list_paths[-1][-1].velocity\n return cost\n <mask token>\n\n def calc_distance_to_ref_from_point(self, state):\n currentpos = state.position\n distances = []\n for p in self.refPathParsedPnts:\n distances.append(self.euclidean_distance(currentpos, p))\n smallest_points = nsmallest(2, distances)\n index1 = distances.index(smallest_points[0])\n index2 = distances.index(smallest_points[1])\n p1 = self.refPathParsedPnts[index1]\n p2 = self.refPathParsedPnts[index2]\n distance_to_refrence = np.abs(np.cross(p2 - p1, currentpos - p1) /\n np.linalg.norm(p2 - p1))\n return distance_to_refrence\n\n def calc_distance_to_goal_from_point(self, state):\n currentpos = state.position\n distances = []\n for p in self.refPathParsedPnts:\n distances.append(self.euclidean_distance(currentpos, p))\n index_smallest_dist = distances.index(min(distances))\n totaltogoal = 0\n for p in range(index_smallest_dist, len(self.refPathParsedPnts) - 1):\n totaltogoal = totaltogoal + self.euclidean_distance(self.\n refPathParsedPnts[p], self.refPathParsedPnts[p + 1])\n return totaltogoal\n\n def get_index_nearest_obst_infront(self, node_current):\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n disttoobst = [np.inf] * len(self.list_obstacles)\n for i in range(len(self.list_obstacles)):\n obst = self.list_obstacles[i]\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n dist = self.euclidean_distance(currentpos, obstPos.position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if dist <= lookaheadVar:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(\n vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient,\n vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n disttoobst[i] = dist\n else:\n disttoobst[i] = np.inf\n else:\n disttoobst[i] = np.inf\n index_smallest_dist = disttoobst.index(min(disttoobst))\n if disttoobst[index_smallest_dist] == np.inf:\n index_smallest_dist = -1\n return index_smallest_dist\n <mask token>\n\n def cost_for_modeA_problem_old(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_euclidean_distance(current_node=\n node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_orient = (self.orientation_desired.end + self.\n orientation_desired.start) / 2\n desired_velocity = (self.velocity_desired.start + self.\n velocity_desired.end) / 2\n diff_desiredOrient = self.calc_orientation_diff(desired_orient,\n path_last[-1].orientation)\n diff_deiredVelocity = abs(velocity - desired_velocity)\n angle_intervall = abs(abs(self.orientation_desired.start) -\n abs(self.orientation_desired.end))\n if output_logs:\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired orient of current node is(deg): ',\n diff_desiredOrient)\n print('diff desired velocity of current node is(deg): ',\n diff_deiredVelocity)\n if distance <= 1:\n desired_vel_weight = 1\n desired_orient_weight = 1\n cost = desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n if distance <= 0.1 and node_current.list_paths[-1][-1\n ].time_step < self.time_desired.start:\n return (self.time_desired.start - node_current.\n list_paths[-1][-1].time_step) * 0.001\n if orientationToGoalDiffdegree > 45:\n if distance >= 10:\n velocity_weight = 1\n cost = distance / velocity\n return cost\n if distance < 10 and distance >= 5:\n return np.inf\n if distance < 5:\n return np.inf\n else:\n if distance >= 10:\n velocity_weight = 1\n cost = distance / velocity * velocity_weight\n return cost\n if distance < 10 and distance >= 5:\n velocity_weight = 0.5\n desired_vel_weight = 1\n desired_orient_weight = 1\n cost = distance / velocity\n cost = cost + desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n if distance < 5:\n cost = distance / velocity\n desired_vel_weight = 3\n desired_orient_weight = 3\n cost = cost + desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n",
"step-2": "<mask token>\n\n\nclass StudentMotionPlanner(GreedyBestFirstSearch):\n <mask token>\n\n def __init__(self, scenario, planningProblem, automata, plot_config=\n DefaultPlotConfig):\n super().__init__(scenario=scenario, planningProblem=planningProblem,\n automaton=automata, plot_config=plot_config)\n\n def evaluation_function(self, node_current: PriorityNode) ->float:\n \"\"\"\n Evaluation function of GBFS is f(n) = h(n)\n \"\"\"\n node_current.priority = self.heuristic_function(node_current=\n node_current)\n return node_current.priority\n <mask token>\n\n def cost_for_modeA_problem(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_euclidean_distance(current_node=\n node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_orient = (self.orientation_desired.end + self.\n orientation_desired.start) / 2\n desired_velocity = (self.velocity_desired.start + self.\n velocity_desired.end) / 2\n diff_desiredOrient = abs(self.calc_orientation_diff(\n desired_orient, path_last[-1].orientation))\n diff_deiredVelocity = abs(velocity - desired_velocity)\n angle_intervall = abs(abs(self.orientation_desired.start) -\n abs(self.orientation_desired.end))\n if output_logs:\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired orient of current node is(deg): ',\n diff_desiredOrient)\n print('diff desired velocity of current node is(deg): ',\n diff_deiredVelocity)\n current_orient = path_last[-1].orientation\n if distance <= 10:\n if (current_orient < self.orientation_desired.start or \n current_orient > self.orientation_desired.end):\n return np.inf\n if (velocity < self.velocity_desired.start or velocity >\n self.velocity_desired.end):\n return np.inf\n weight = 10\n cost = (distance / velocity + weight * diff_deiredVelocity +\n weight * diff_desiredOrient)\n return cost\n\n def cost_for_modeB_problem(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_distance_to_goal_from_point(node_current\n .list_paths[-1][-1])\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_velocity = (self.velocity_desired.start + self.\n velocity_desired.end) / 2\n diff_deiredVelocity = abs(velocity - desired_velocity)\n self.test_if_in_goal_lanelet(node_current)\n if output_logs:\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired velocity of current node is(deg): ',\n diff_deiredVelocity)\n if self.planningProblem.goal.is_reached_only_pos(node_current\n .list_paths[-1][-1]):\n cost = (self.time_desired.start - node_current.\n list_paths[-1][-1].time_step) * 0.01\n cost = cost + diff_deiredVelocity + velocity * 0.01\n return cost\n cost = (distance / velocity + 2 * diff_deiredVelocity + \n velocity * 0.01)\n return cost\n\n def cost_for_modeC_problem(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_euclidean_distance(current_node=\n node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_orient = (self.orientation_desired.end + self.\n orientation_desired.start) / 2\n diff_desiredOrient = self.calc_orientation_diff(desired_orient,\n path_last[-1].orientation)\n angle_intervall = abs(abs(self.orientation_desired.start) -\n abs(self.orientation_desired.end))\n arry = node_current.list_paths[-1][-1].position\n a = np.array([arry[0], arry[1]])\n if self.routeplannerresult is not None:\n distance_to_refrence = self.calc_distance_to_nearest_point(\n self.routeplannerresult.reference_path, a)\n else:\n distance_to_refrence = 0\n if output_logs:\n print('distance to reference path: ', distance_to_refrence)\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired orient of current node is(deg): ',\n diff_desiredOrient)\n if distance <= 0.1 and node_current.list_paths[-1][-1\n ].time_step < self.time_desired.start:\n return self.time_desired.start - node_current.list_paths[-1\n ][-1].time_step\n if self.planningProblem.goal.is_reached_only_pos(node_current\n .list_paths[-1][-1]):\n cost = (self.time_desired.start - node_current.\n list_paths[-1][-1].time_step) * 0.01\n cost = cost + diff_desiredOrient + velocity * 0.01\n return cost\n cost = (distance / velocity + 2 * diff_desiredOrient + \n velocity * 0.01)\n return cost\n\n def cost_for_modeD_problem(self, node_current, output_logs):\n totaltogoal = self.calc_distance_to_goal_from_point(node_current.\n list_paths[-1][-1])\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n if self.planningProblem.goal.is_reached_only_pos(node_current.\n list_paths[-1][-1]):\n return (self.time_desired.start - node_current.list_paths[-\n 1][-1].time_step) * 0.01\n velocity = node_current.list_paths[-1][-1].velocity\n if np.isclose(velocity, 0):\n return np.inf\n cost = totaltogoal / node_current.list_paths[-1][-1].velocity\n return cost\n <mask token>\n\n def calc_distance_to_ref_from_point(self, state):\n currentpos = state.position\n distances = []\n for p in self.refPathParsedPnts:\n distances.append(self.euclidean_distance(currentpos, p))\n smallest_points = nsmallest(2, distances)\n index1 = distances.index(smallest_points[0])\n index2 = distances.index(smallest_points[1])\n p1 = self.refPathParsedPnts[index1]\n p2 = self.refPathParsedPnts[index2]\n distance_to_refrence = np.abs(np.cross(p2 - p1, currentpos - p1) /\n np.linalg.norm(p2 - p1))\n return distance_to_refrence\n\n def calc_distance_to_goal_from_point(self, state):\n currentpos = state.position\n distances = []\n for p in self.refPathParsedPnts:\n distances.append(self.euclidean_distance(currentpos, p))\n index_smallest_dist = distances.index(min(distances))\n totaltogoal = 0\n for p in range(index_smallest_dist, len(self.refPathParsedPnts) - 1):\n totaltogoal = totaltogoal + self.euclidean_distance(self.\n refPathParsedPnts[p], self.refPathParsedPnts[p + 1])\n return totaltogoal\n\n def get_index_nearest_obst_infront(self, node_current):\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n disttoobst = [np.inf] * len(self.list_obstacles)\n for i in range(len(self.list_obstacles)):\n obst = self.list_obstacles[i]\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n dist = self.euclidean_distance(currentpos, obstPos.position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if dist <= lookaheadVar:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(\n vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient,\n vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n disttoobst[i] = dist\n else:\n disttoobst[i] = np.inf\n else:\n disttoobst[i] = np.inf\n index_smallest_dist = disttoobst.index(min(disttoobst))\n if disttoobst[index_smallest_dist] == np.inf:\n index_smallest_dist = -1\n return index_smallest_dist\n <mask token>\n\n def cost_for_modeA_problem_old(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_euclidean_distance(current_node=\n node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_orient = (self.orientation_desired.end + self.\n orientation_desired.start) / 2\n desired_velocity = (self.velocity_desired.start + self.\n velocity_desired.end) / 2\n diff_desiredOrient = self.calc_orientation_diff(desired_orient,\n path_last[-1].orientation)\n diff_deiredVelocity = abs(velocity - desired_velocity)\n angle_intervall = abs(abs(self.orientation_desired.start) -\n abs(self.orientation_desired.end))\n if output_logs:\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired orient of current node is(deg): ',\n diff_desiredOrient)\n print('diff desired velocity of current node is(deg): ',\n diff_deiredVelocity)\n if distance <= 1:\n desired_vel_weight = 1\n desired_orient_weight = 1\n cost = desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n if distance <= 0.1 and node_current.list_paths[-1][-1\n ].time_step < self.time_desired.start:\n return (self.time_desired.start - node_current.\n list_paths[-1][-1].time_step) * 0.001\n if orientationToGoalDiffdegree > 45:\n if distance >= 10:\n velocity_weight = 1\n cost = distance / velocity\n return cost\n if distance < 10 and distance >= 5:\n return np.inf\n if distance < 5:\n return np.inf\n else:\n if distance >= 10:\n velocity_weight = 1\n cost = distance / velocity * velocity_weight\n return cost\n if distance < 10 and distance >= 5:\n velocity_weight = 0.5\n desired_vel_weight = 1\n desired_orient_weight = 1\n cost = distance / velocity\n cost = cost + desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n if distance < 5:\n cost = distance / velocity\n desired_vel_weight = 3\n desired_orient_weight = 3\n cost = cost + desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n",
"step-3": "<mask token>\n\n\nclass StudentMotionPlanner(GreedyBestFirstSearch):\n <mask token>\n\n def __init__(self, scenario, planningProblem, automata, plot_config=\n DefaultPlotConfig):\n super().__init__(scenario=scenario, planningProblem=planningProblem,\n automaton=automata, plot_config=plot_config)\n\n def evaluation_function(self, node_current: PriorityNode) ->float:\n \"\"\"\n Evaluation function of GBFS is f(n) = h(n)\n \"\"\"\n node_current.priority = self.heuristic_function(node_current=\n node_current)\n return node_current.priority\n <mask token>\n\n def cost_for_modeA_problem(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_euclidean_distance(current_node=\n node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_orient = (self.orientation_desired.end + self.\n orientation_desired.start) / 2\n desired_velocity = (self.velocity_desired.start + self.\n velocity_desired.end) / 2\n diff_desiredOrient = abs(self.calc_orientation_diff(\n desired_orient, path_last[-1].orientation))\n diff_deiredVelocity = abs(velocity - desired_velocity)\n angle_intervall = abs(abs(self.orientation_desired.start) -\n abs(self.orientation_desired.end))\n if output_logs:\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired orient of current node is(deg): ',\n diff_desiredOrient)\n print('diff desired velocity of current node is(deg): ',\n diff_deiredVelocity)\n current_orient = path_last[-1].orientation\n if distance <= 10:\n if (current_orient < self.orientation_desired.start or \n current_orient > self.orientation_desired.end):\n return np.inf\n if (velocity < self.velocity_desired.start or velocity >\n self.velocity_desired.end):\n return np.inf\n weight = 10\n cost = (distance / velocity + weight * diff_deiredVelocity +\n weight * diff_desiredOrient)\n return cost\n\n def cost_for_modeB_problem(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_distance_to_goal_from_point(node_current\n .list_paths[-1][-1])\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_velocity = (self.velocity_desired.start + self.\n velocity_desired.end) / 2\n diff_deiredVelocity = abs(velocity - desired_velocity)\n self.test_if_in_goal_lanelet(node_current)\n if output_logs:\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired velocity of current node is(deg): ',\n diff_deiredVelocity)\n if self.planningProblem.goal.is_reached_only_pos(node_current\n .list_paths[-1][-1]):\n cost = (self.time_desired.start - node_current.\n list_paths[-1][-1].time_step) * 0.01\n cost = cost + diff_deiredVelocity + velocity * 0.01\n return cost\n cost = (distance / velocity + 2 * diff_deiredVelocity + \n velocity * 0.01)\n return cost\n\n def cost_for_modeC_problem(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_euclidean_distance(current_node=\n node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_orient = (self.orientation_desired.end + self.\n orientation_desired.start) / 2\n diff_desiredOrient = self.calc_orientation_diff(desired_orient,\n path_last[-1].orientation)\n angle_intervall = abs(abs(self.orientation_desired.start) -\n abs(self.orientation_desired.end))\n arry = node_current.list_paths[-1][-1].position\n a = np.array([arry[0], arry[1]])\n if self.routeplannerresult is not None:\n distance_to_refrence = self.calc_distance_to_nearest_point(\n self.routeplannerresult.reference_path, a)\n else:\n distance_to_refrence = 0\n if output_logs:\n print('distance to reference path: ', distance_to_refrence)\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired orient of current node is(deg): ',\n diff_desiredOrient)\n if distance <= 0.1 and node_current.list_paths[-1][-1\n ].time_step < self.time_desired.start:\n return self.time_desired.start - node_current.list_paths[-1\n ][-1].time_step\n if self.planningProblem.goal.is_reached_only_pos(node_current\n .list_paths[-1][-1]):\n cost = (self.time_desired.start - node_current.\n list_paths[-1][-1].time_step) * 0.01\n cost = cost + diff_desiredOrient + velocity * 0.01\n return cost\n cost = (distance / velocity + 2 * diff_desiredOrient + \n velocity * 0.01)\n return cost\n\n def cost_for_modeD_problem(self, node_current, output_logs):\n totaltogoal = self.calc_distance_to_goal_from_point(node_current.\n list_paths[-1][-1])\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n if self.planningProblem.goal.is_reached_only_pos(node_current.\n list_paths[-1][-1]):\n return (self.time_desired.start - node_current.list_paths[-\n 1][-1].time_step) * 0.01\n velocity = node_current.list_paths[-1][-1].velocity\n if np.isclose(velocity, 0):\n return np.inf\n cost = totaltogoal / node_current.list_paths[-1][-1].velocity\n return cost\n\n def cost_for_Survival_problem(self, node_current, output_logs):\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n for obst in self.list_obstacles:\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n disttoobst = self.euclidean_distance(currentpos, obstPos.\n position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if disttoobst <= lookaheadVar:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(\n vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient,\n vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n if not 'velocity' in obstPos.attributes:\n continue\n if (node_current.list_paths[-1][-1].velocity >\n obstPos.velocity and obstPos.velocity != 0):\n return np.inf\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n\n def calc_distance_to_ref_from_point(self, state):\n currentpos = state.position\n distances = []\n for p in self.refPathParsedPnts:\n distances.append(self.euclidean_distance(currentpos, p))\n smallest_points = nsmallest(2, distances)\n index1 = distances.index(smallest_points[0])\n index2 = distances.index(smallest_points[1])\n p1 = self.refPathParsedPnts[index1]\n p2 = self.refPathParsedPnts[index2]\n distance_to_refrence = np.abs(np.cross(p2 - p1, currentpos - p1) /\n np.linalg.norm(p2 - p1))\n return distance_to_refrence\n\n def calc_distance_to_goal_from_point(self, state):\n currentpos = state.position\n distances = []\n for p in self.refPathParsedPnts:\n distances.append(self.euclidean_distance(currentpos, p))\n index_smallest_dist = distances.index(min(distances))\n totaltogoal = 0\n for p in range(index_smallest_dist, len(self.refPathParsedPnts) - 1):\n totaltogoal = totaltogoal + self.euclidean_distance(self.\n refPathParsedPnts[p], self.refPathParsedPnts[p + 1])\n return totaltogoal\n\n def get_index_nearest_obst_infront(self, node_current):\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n disttoobst = [np.inf] * len(self.list_obstacles)\n for i in range(len(self.list_obstacles)):\n obst = self.list_obstacles[i]\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n dist = self.euclidean_distance(currentpos, obstPos.position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if dist <= lookaheadVar:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(\n vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient,\n vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n disttoobst[i] = dist\n else:\n disttoobst[i] = np.inf\n else:\n disttoobst[i] = np.inf\n index_smallest_dist = disttoobst.index(min(disttoobst))\n if disttoobst[index_smallest_dist] == np.inf:\n index_smallest_dist = -1\n return index_smallest_dist\n <mask token>\n\n def cost_for_modeA_problem_old(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_euclidean_distance(current_node=\n node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_orient = (self.orientation_desired.end + self.\n orientation_desired.start) / 2\n desired_velocity = (self.velocity_desired.start + self.\n velocity_desired.end) / 2\n diff_desiredOrient = self.calc_orientation_diff(desired_orient,\n path_last[-1].orientation)\n diff_deiredVelocity = abs(velocity - desired_velocity)\n angle_intervall = abs(abs(self.orientation_desired.start) -\n abs(self.orientation_desired.end))\n if output_logs:\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired orient of current node is(deg): ',\n diff_desiredOrient)\n print('diff desired velocity of current node is(deg): ',\n diff_deiredVelocity)\n if distance <= 1:\n desired_vel_weight = 1\n desired_orient_weight = 1\n cost = desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n if distance <= 0.1 and node_current.list_paths[-1][-1\n ].time_step < self.time_desired.start:\n return (self.time_desired.start - node_current.\n list_paths[-1][-1].time_step) * 0.001\n if orientationToGoalDiffdegree > 45:\n if distance >= 10:\n velocity_weight = 1\n cost = distance / velocity\n return cost\n if distance < 10 and distance >= 5:\n return np.inf\n if distance < 5:\n return np.inf\n else:\n if distance >= 10:\n velocity_weight = 1\n cost = distance / velocity * velocity_weight\n return cost\n if distance < 10 and distance >= 5:\n velocity_weight = 0.5\n desired_vel_weight = 1\n desired_orient_weight = 1\n cost = distance / velocity\n cost = cost + desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n if distance < 5:\n cost = distance / velocity\n desired_vel_weight = 3\n desired_orient_weight = 3\n cost = cost + desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n",
"step-4": "<mask token>\n\n\nclass StudentMotionPlanner(GreedyBestFirstSearch):\n <mask token>\n\n def __init__(self, scenario, planningProblem, automata, plot_config=\n DefaultPlotConfig):\n super().__init__(scenario=scenario, planningProblem=planningProblem,\n automaton=automata, plot_config=plot_config)\n\n def evaluation_function(self, node_current: PriorityNode) ->float:\n \"\"\"\n Evaluation function of GBFS is f(n) = h(n)\n \"\"\"\n node_current.priority = self.heuristic_function(node_current=\n node_current)\n return node_current.priority\n\n def heuristic_function(self, node_current: PriorityNode) ->float:\n \"\"\"\n Function that evaluates the heuristic cost h(n) in student class.\n Created by Mohamed A. Abdellaoui 10.01.2021\n \n \"\"\"\n output_logs = False\n if output_logs:\n print('##################')\n print('current time step: ', node_current.list_paths[-1][-1].\n time_step)\n print('current problem mode', self.planningProblemType)\n print('depth tree: ', node_current.depth_tree)\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n if self.reached_goal(node_current.list_paths[-1]):\n return 0.0\n if self.routeplannerresult is None:\n return np.inf\n if not self.disableObstAvoidance:\n for obst in self.list_obstacles:\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n disttoobst = self.euclidean_distance(currentpos,\n obstPos.position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if disttoobst <= lookaheadVar:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(\n vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient,\n vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n if not 'velocity' in obstPos.attributes:\n continue\n if (node_current.list_paths[-1][-1].velocity >\n obstPos.velocity and obstPos.velocity != 0):\n return np.inf\n index_smallest_dist = self.get_index_nearest_obst_infront(\n node_current)\n if index_smallest_dist != -1:\n obst = self.list_obstacles[index_smallest_dist]\n obstPos = obst.state_at_time(currenttimestep)\n if obstPos is not None and 'velocity' in obstPos.attributes:\n if obstPos.velocity == 0:\n cost = node_current.list_paths[-1][-1].velocity\n return cost\n if node_current.list_paths[-1][-1\n ].velocity > obstPos.velocity:\n return np.inf\n cost = abs(node_current.list_paths[-1][-1].velocity -\n obstPos.velocity)\n return cost\n if self.planningProblemType == 'ModeA':\n cost = self.cost_for_modeA_problem(node_current, output_logs)\n if output_logs:\n print('Cost from modeA cost func: ', cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeB':\n cost = self.cost_for_modeB_problem(node_current, output_logs)\n if output_logs:\n print('Cost from modeB cost func: ', cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeC':\n cost = self.cost_for_modeC_problem(node_current, output_logs)\n if output_logs:\n print('Cost from modeB cost func: ', cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeD':\n cost = self.cost_for_modeD_problem(node_current, output_logs)\n if output_logs:\n print('Cost from modeB cost func: ', cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'Survival':\n cost = self.cost_for_Survival_problem(node_current, output_logs)\n if output_logs:\n print('Cost from modeB cost func: ', cost)\n if cost < 0:\n return 0\n return cost\n\n def cost_for_modeA_problem(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_euclidean_distance(current_node=\n node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_orient = (self.orientation_desired.end + self.\n orientation_desired.start) / 2\n desired_velocity = (self.velocity_desired.start + self.\n velocity_desired.end) / 2\n diff_desiredOrient = abs(self.calc_orientation_diff(\n desired_orient, path_last[-1].orientation))\n diff_deiredVelocity = abs(velocity - desired_velocity)\n angle_intervall = abs(abs(self.orientation_desired.start) -\n abs(self.orientation_desired.end))\n if output_logs:\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired orient of current node is(deg): ',\n diff_desiredOrient)\n print('diff desired velocity of current node is(deg): ',\n diff_deiredVelocity)\n current_orient = path_last[-1].orientation\n if distance <= 10:\n if (current_orient < self.orientation_desired.start or \n current_orient > self.orientation_desired.end):\n return np.inf\n if (velocity < self.velocity_desired.start or velocity >\n self.velocity_desired.end):\n return np.inf\n weight = 10\n cost = (distance / velocity + weight * diff_deiredVelocity +\n weight * diff_desiredOrient)\n return cost\n\n def cost_for_modeB_problem(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_distance_to_goal_from_point(node_current\n .list_paths[-1][-1])\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_velocity = (self.velocity_desired.start + self.\n velocity_desired.end) / 2\n diff_deiredVelocity = abs(velocity - desired_velocity)\n self.test_if_in_goal_lanelet(node_current)\n if output_logs:\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired velocity of current node is(deg): ',\n diff_deiredVelocity)\n if self.planningProblem.goal.is_reached_only_pos(node_current\n .list_paths[-1][-1]):\n cost = (self.time_desired.start - node_current.\n list_paths[-1][-1].time_step) * 0.01\n cost = cost + diff_deiredVelocity + velocity * 0.01\n return cost\n cost = (distance / velocity + 2 * diff_deiredVelocity + \n velocity * 0.01)\n return cost\n\n def cost_for_modeC_problem(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_euclidean_distance(current_node=\n node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_orient = (self.orientation_desired.end + self.\n orientation_desired.start) / 2\n diff_desiredOrient = self.calc_orientation_diff(desired_orient,\n path_last[-1].orientation)\n angle_intervall = abs(abs(self.orientation_desired.start) -\n abs(self.orientation_desired.end))\n arry = node_current.list_paths[-1][-1].position\n a = np.array([arry[0], arry[1]])\n if self.routeplannerresult is not None:\n distance_to_refrence = self.calc_distance_to_nearest_point(\n self.routeplannerresult.reference_path, a)\n else:\n distance_to_refrence = 0\n if output_logs:\n print('distance to reference path: ', distance_to_refrence)\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired orient of current node is(deg): ',\n diff_desiredOrient)\n if distance <= 0.1 and node_current.list_paths[-1][-1\n ].time_step < self.time_desired.start:\n return self.time_desired.start - node_current.list_paths[-1\n ][-1].time_step\n if self.planningProblem.goal.is_reached_only_pos(node_current\n .list_paths[-1][-1]):\n cost = (self.time_desired.start - node_current.\n list_paths[-1][-1].time_step) * 0.01\n cost = cost + diff_desiredOrient + velocity * 0.01\n return cost\n cost = (distance / velocity + 2 * diff_desiredOrient + \n velocity * 0.01)\n return cost\n\n def cost_for_modeD_problem(self, node_current, output_logs):\n totaltogoal = self.calc_distance_to_goal_from_point(node_current.\n list_paths[-1][-1])\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n if self.planningProblem.goal.is_reached_only_pos(node_current.\n list_paths[-1][-1]):\n return (self.time_desired.start - node_current.list_paths[-\n 1][-1].time_step) * 0.01\n velocity = node_current.list_paths[-1][-1].velocity\n if np.isclose(velocity, 0):\n return np.inf\n cost = totaltogoal / node_current.list_paths[-1][-1].velocity\n return cost\n\n def cost_for_Survival_problem(self, node_current, output_logs):\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n for obst in self.list_obstacles:\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n disttoobst = self.euclidean_distance(currentpos, obstPos.\n position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if disttoobst <= lookaheadVar:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(\n vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient,\n vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n if not 'velocity' in obstPos.attributes:\n continue\n if (node_current.list_paths[-1][-1].velocity >\n obstPos.velocity and obstPos.velocity != 0):\n return np.inf\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n\n def calc_distance_to_ref_from_point(self, state):\n currentpos = state.position\n distances = []\n for p in self.refPathParsedPnts:\n distances.append(self.euclidean_distance(currentpos, p))\n smallest_points = nsmallest(2, distances)\n index1 = distances.index(smallest_points[0])\n index2 = distances.index(smallest_points[1])\n p1 = self.refPathParsedPnts[index1]\n p2 = self.refPathParsedPnts[index2]\n distance_to_refrence = np.abs(np.cross(p2 - p1, currentpos - p1) /\n np.linalg.norm(p2 - p1))\n return distance_to_refrence\n\n def calc_distance_to_goal_from_point(self, state):\n currentpos = state.position\n distances = []\n for p in self.refPathParsedPnts:\n distances.append(self.euclidean_distance(currentpos, p))\n index_smallest_dist = distances.index(min(distances))\n totaltogoal = 0\n for p in range(index_smallest_dist, len(self.refPathParsedPnts) - 1):\n totaltogoal = totaltogoal + self.euclidean_distance(self.\n refPathParsedPnts[p], self.refPathParsedPnts[p + 1])\n return totaltogoal\n\n def get_index_nearest_obst_infront(self, node_current):\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n disttoobst = [np.inf] * len(self.list_obstacles)\n for i in range(len(self.list_obstacles)):\n obst = self.list_obstacles[i]\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n dist = self.euclidean_distance(currentpos, obstPos.position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if dist <= lookaheadVar:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(\n vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient,\n vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n disttoobst[i] = dist\n else:\n disttoobst[i] = np.inf\n else:\n disttoobst[i] = np.inf\n index_smallest_dist = disttoobst.index(min(disttoobst))\n if disttoobst[index_smallest_dist] == np.inf:\n index_smallest_dist = -1\n return index_smallest_dist\n <mask token>\n\n def cost_for_modeA_problem_old(self, node_current, output_logs):\n if self.position_desired is None:\n if output_logs:\n print('exit Cost function because position desired is None!')\n return self.time_desired.start - node_current.list_paths[-1][-1\n ].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n distance = self.calc_euclidean_distance(current_node=\n node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal,\n path_last[-1].orientation)\n orientationToGoalDiffdegree = abs(orientationToGoalDiff\n ) * 180 / 3.14\n desired_orient = (self.orientation_desired.end + self.\n orientation_desired.start) / 2\n desired_velocity = (self.velocity_desired.start + self.\n velocity_desired.end) / 2\n diff_desiredOrient = self.calc_orientation_diff(desired_orient,\n path_last[-1].orientation)\n diff_deiredVelocity = abs(velocity - desired_velocity)\n angle_intervall = abs(abs(self.orientation_desired.start) -\n abs(self.orientation_desired.end))\n if output_logs:\n print('Distance to goal of current node is: ', distance)\n print('Velocity of current node is: ', velocity)\n print('Orientation of current position: ', node_current\n .list_paths[-1][-1].orientation)\n print('Angle to goal of current node is: ', angleToGoal)\n print('orientation diff to goal of current node is(deg): ',\n orientationToGoalDiffdegree)\n print('diff desired orient of current node is(deg): ',\n diff_desiredOrient)\n print('diff desired velocity of current node is(deg): ',\n diff_deiredVelocity)\n if distance <= 1:\n desired_vel_weight = 1\n desired_orient_weight = 1\n cost = desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n if distance <= 0.1 and node_current.list_paths[-1][-1\n ].time_step < self.time_desired.start:\n return (self.time_desired.start - node_current.\n list_paths[-1][-1].time_step) * 0.001\n if orientationToGoalDiffdegree > 45:\n if distance >= 10:\n velocity_weight = 1\n cost = distance / velocity\n return cost\n if distance < 10 and distance >= 5:\n return np.inf\n if distance < 5:\n return np.inf\n else:\n if distance >= 10:\n velocity_weight = 1\n cost = distance / velocity * velocity_weight\n return cost\n if distance < 10 and distance >= 5:\n velocity_weight = 0.5\n desired_vel_weight = 1\n desired_orient_weight = 1\n cost = distance / velocity\n cost = cost + desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n if distance < 5:\n cost = distance / velocity\n desired_vel_weight = 3\n desired_orient_weight = 3\n cost = cost + desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = (cost + desired_orient_weight *\n diff_desiredOrient)\n return cost\n",
"step-5": "from SMP.motion_planner.node import PriorityNode\nimport numpy as np\nfrom heapq import nsmallest\nimport sys\nfrom SMP.motion_planner.plot_config import DefaultPlotConfig\nfrom SMP.motion_planner.search_algorithms.best_first_search import GreedyBestFirstSearch\n# imports for route planner:\n\nclass StudentMotionPlanner(GreedyBestFirstSearch):\n \"\"\"\n Motion planner implementation by students.\n Note that you may inherit from any given motion planner as you wish, or come up with your own planner.\n Here as an example, the planner is inherited from the GreedyBestFirstSearch planner.\n \"\"\"\n\n def __init__(self, scenario, planningProblem, automata, plot_config=DefaultPlotConfig):\n super().__init__(scenario=scenario, planningProblem=planningProblem, automaton=automata,\n plot_config=plot_config)\n\n def evaluation_function(self, node_current: PriorityNode) -> float:\n ########################################################################\n # todo: Implement your own evaluation function here. #\n ########################################################################\n # Copied from greedy best first search:\n \"\"\"\n Evaluation function of GBFS is f(n) = h(n)\n \"\"\"\n\n node_current.priority = self.heuristic_function(node_current=node_current)\n return node_current.priority\n\n\n def heuristic_function(self, node_current: PriorityNode) -> float:\n ########################################################################\n # todo: Implement your own heuristic cost calculation here. #\n # Hint: #\n # Use the State of the current node and the information from the #\n # planning problem, as well as from the scenario. #\n # Some helper functions for your convenience can be found in #\n # ./search_algorithms/base_class.py #\n ########################################################################\n \"\"\"\n Function that evaluates the heuristic cost h(n) in student class.\n Created by Mohamed A. Abdellaoui 10.01.2021\n \n \"\"\"\n output_logs = False\n if output_logs:\n print(\"##################\")\n print(\"current time step: \", node_current.list_paths[-1][-1].time_step)\n print(\"current problem mode\", self.planningProblemType)\n print(\"depth tree: \", node_current.depth_tree)\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n\n # Test if reached goal:\n if self.reached_goal(node_current.list_paths[-1]):\n return 0.0\n # Test if route planner failed to find a path: \n if self.routeplannerresult is None:\n return np.inf\n\n ############ Detect cars in front:\n # calc cost based on distance to gool following the refrence path:\n # loop through all obstacles at time step x and find if any is close of current pos:\n if not self.disableObstAvoidance:\n for obst in self.list_obstacles:\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n disttoobst = self.euclidean_distance(currentpos, obstPos.position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if disttoobst <= lookaheadVar:\n # calc orientation diff between car and obstacle:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n if not 'velocity' in obstPos.attributes:\n continue\n if node_current.list_paths[-1][-1].velocity > obstPos.velocity and obstPos.velocity != 0:\n return np.inf\n \n # get index of closest object to the ego vehicle:\n index_smallest_dist = self.get_index_nearest_obst_infront(node_current)\n \n # use the index to locate vehicle to calc cost: \n if index_smallest_dist != -1:\n # found the index of vehicle with smallest distance to ego car:\n obst = self.list_obstacles[index_smallest_dist]\n obstPos = obst.state_at_time(currenttimestep)\n if obstPos is not None and 'velocity' in obstPos.attributes:\n if obstPos.velocity == 0:\n cost = node_current.list_paths[-1][-1].velocity\n return cost\n if node_current.list_paths[-1][-1].velocity > obstPos.velocity:\n return np.inf\n cost = abs(node_current.list_paths[-1][-1].velocity - obstPos.velocity)\n return cost\n #########################################################\n\n # Decide based on planning problem type how to calculate cost\n if self.planningProblemType == 'ModeA':\n # Call function for planning problem with desired time, position, speed and orientation\n cost = self.cost_for_modeA_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeA cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeB':\n # Call function for planning problem with desired time, position and velocity:\n cost = self.cost_for_modeB_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeC':\n # Call function for planning problem with desired time, position and orientation:\n cost = self.cost_for_modeC_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'ModeD':\n # Call function for planning problem with desired time and position:\n cost = self.cost_for_modeD_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n elif self.planningProblemType == 'Survival':\n # Call function for planning problem with desired time:\n cost = self.cost_for_Survival_problem(node_current, output_logs)\n if output_logs:\n print(\"Cost from modeB cost func: \", cost)\n if cost < 0:\n return 0\n return cost\n\n def cost_for_modeA_problem(self, node_current, output_logs):\n # Function for planning problem with desired time, position, speed and orientation\n if self.position_desired is None:\n if output_logs:\n print(\"exit Cost function because position desired is None!\")\n return self.time_desired.start - node_current.list_paths[-1][-1].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n # Calc Variables:\n distance = self.calc_euclidean_distance(current_node=node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)\n orientationToGoalDiffdegree = (abs(orientationToGoalDiff) * 180) / 3.14\n desired_orient = (self.orientation_desired.end + self.orientation_desired.start) / 2\n desired_velocity = (self.velocity_desired.start + self.velocity_desired.end) / 2\n diff_desiredOrient = abs(self.calc_orientation_diff(desired_orient, path_last[-1].orientation))\n diff_deiredVelocity = abs(velocity - desired_velocity)\n angle_intervall = abs(abs(self.orientation_desired.start) - abs(self.orientation_desired.end))\n\n # Output data for debugging:\n if output_logs:\n print(\"Distance to goal of current node is: \", distance)\n print(\"Velocity of current node is: \", velocity)\n print(\"Orientation of current position: \", node_current.list_paths[-1][-1].orientation)\n print(\"Angle to goal of current node is: \", angleToGoal)\n print(\"orientation diff to goal of current node is(deg): \", orientationToGoalDiffdegree)\n print(\"diff desired orient of current node is(deg): \", diff_desiredOrient)\n print(\"diff desired velocity of current node is(deg): \", diff_deiredVelocity)\n # test 16.01:\n current_orient = path_last[-1].orientation\n if distance <= 10:\n if current_orient < self.orientation_desired.start or current_orient > self.orientation_desired.end:\n return np.inf\n if velocity < self.velocity_desired.start or velocity > self.velocity_desired.end:\n return np.inf\n\n weight = 10\n # if very colse to goal, minimize the diff velocity and diff orient\n cost = (distance / velocity) + weight* diff_deiredVelocity + weight* diff_desiredOrient\n #cost = distance + diff_desiredOrient + diff_deiredVelocity\n return cost\n\n def cost_for_modeB_problem(self, node_current, output_logs):\n # Function for planning problem with desired time, position, speed\n if self.position_desired is None:\n if output_logs:\n print(\"exit Cost function because position desired is None!\")\n return self.time_desired.start - node_current.list_paths[-1][-1].time_step\n else:\n\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n # Calc Variables:\n distance = self.calc_distance_to_goal_from_point(node_current.list_paths[-1][-1])\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)\n orientationToGoalDiffdegree = (abs(orientationToGoalDiff)*180)/3.14\n desired_velocity = (self.velocity_desired.start + self.velocity_desired.end)/2\n diff_deiredVelocity = abs(velocity - desired_velocity)\n self.test_if_in_goal_lanelet(node_current)\n # Output data for debugging:\n if output_logs:\n print(\"Distance to goal of current node is: \", distance)\n print(\"Velocity of current node is: \", velocity)\n print(\"Orientation of current position: \",node_current.list_paths[-1][-1].orientation)\n print(\"Angle to goal of current node is: \", angleToGoal)\n print(\"orientation diff to goal of current node is(deg): \", orientationToGoalDiffdegree)\n print(\"diff desired velocity of current node is(deg): \", diff_deiredVelocity)\n\n # If very close to target but time is still not reached:\n #if distance <= 0.1 and node_current.list_paths[-1][-1].time_step < self.time_desired.start:\n # return self.time_desired.start - node_current.list_paths[-1][-1].time_step * 0.01\n if self.planningProblem.goal.is_reached_only_pos(node_current.list_paths[-1][-1]):\n cost = (self.time_desired.start - node_current.list_paths[-1][-1].time_step) * 0.01\n cost = cost + diff_deiredVelocity + velocity *0.01\n return cost\n\n cost = ( distance / velocity ) + 2 * diff_deiredVelocity + velocity*0.01\n return cost\n\n def cost_for_modeC_problem(self, node_current, output_logs):\n # Function for planning problem with desired time, position, speed and orientation\n if self.position_desired is None:\n if output_logs:\n print(\"exit Cost function because position desired is None!\")\n return self.time_desired.start - node_current.list_paths[-1][-1].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n # Calc Variables:\n distance = self.calc_euclidean_distance(current_node=node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)\n orientationToGoalDiffdegree = (abs(orientationToGoalDiff)*180)/3.14\n desired_orient = (self.orientation_desired.end + self.orientation_desired.start) / 2\n diff_desiredOrient = self.calc_orientation_diff(desired_orient, path_last[-1].orientation)\n angle_intervall = abs(abs(self.orientation_desired.start) - abs(self.orientation_desired.end))\n\n # Calcualte distance between currrent position and reference path:\n arry = node_current.list_paths[-1][-1].position\n a = np.array([arry[0], arry[1]])\n if self.routeplannerresult is not None:\n distance_to_refrence = self.calc_distance_to_nearest_point(self.routeplannerresult.reference_path,\n a)\n else:\n distance_to_refrence = 0\n\n # Output data for debugging:\n if output_logs:\n print(\"distance to reference path: \", distance_to_refrence)\n print(\"Distance to goal of current node is: \", distance)\n print(\"Velocity of current node is: \", velocity)\n print(\"Orientation of current position: \",node_current.list_paths[-1][-1].orientation)\n print(\"Angle to goal of current node is: \", angleToGoal)\n print(\"orientation diff to goal of current node is(deg): \", orientationToGoalDiffdegree)\n print(\"diff desired orient of current node is(deg): \", diff_desiredOrient)\n\n # If very close to target but time is still not reached:\n if distance <= 0.1 and node_current.list_paths[-1][-1].time_step < self.time_desired.start:\n return self.time_desired.start - node_current.list_paths[-1][-1].time_step\n\n if self.planningProblem.goal.is_reached_only_pos(node_current.list_paths[-1][-1]):\n cost = (self.time_desired.start - node_current.list_paths[-1][-1].time_step) * 0.01\n cost = cost + diff_desiredOrient + velocity *0.01\n return cost\n\n cost = ( distance / velocity ) + 2 * diff_desiredOrient + velocity*0.01\n return cost\n\n def cost_for_modeD_problem(self, node_current, output_logs):\n\n totaltogoal = self.calc_distance_to_goal_from_point(node_current.list_paths[-1][-1])\n if self.position_desired is None:\n if output_logs:\n print(\"exit Cost function because position desired is None!\")\n return self.time_desired.start - node_current.list_paths[-1][-1].time_step\n else:\n if self.planningProblem.goal.is_reached_only_pos(node_current.list_paths[-1][-1]):\n return (self.time_desired.start - node_current.list_paths[-1][-1].time_step) *0.01\n velocity = node_current.list_paths[-1][-1].velocity\n if np.isclose(velocity, 0):\n return np.inf\n cost = totaltogoal / node_current.list_paths[-1][-1].velocity\n return cost\n\n def cost_for_Survival_problem(self, node_current, output_logs):\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n for obst in self.list_obstacles:\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n disttoobst = self.euclidean_distance(currentpos, obstPos.position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if disttoobst <= lookaheadVar:\n # calc orientation diff between car and obstacle:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n if not 'velocity' in obstPos.attributes:\n continue\n if node_current.list_paths[-1][-1].velocity > obstPos.velocity and obstPos.velocity != 0:\n return np.inf\n return self.time_desired.start - node_current.list_paths[-1][-1].time_step\n\n def calc_distance_to_ref_from_point(self, state):\n #calc distance of points to each point of refrence path:\n currentpos = state.position\n distances = []\n for p in self.refPathParsedPnts:\n distances.append(self.euclidean_distance(currentpos, p))\n smallest_points = nsmallest(2, distances)\n index1 = distances.index(smallest_points[0])\n index2 = distances.index(smallest_points[1])\n p1 = self.refPathParsedPnts[index1]\n p2 = self.refPathParsedPnts[index2]\n distance_to_refrence = np.abs(np.cross(p2 - p1, currentpos - p1) / np.linalg.norm(p2 - p1))\n return distance_to_refrence\n\n def calc_distance_to_goal_from_point(self, state):\n #calc distance of points to each point of refrence path:\n currentpos = state.position\n distances = []\n for p in self.refPathParsedPnts:\n distances.append(self.euclidean_distance(currentpos, p))\n index_smallest_dist = distances.index(min(distances))\n totaltogoal = 0\n for p in range(index_smallest_dist, len(self.refPathParsedPnts) - 1):\n totaltogoal = totaltogoal + self.euclidean_distance(self.refPathParsedPnts[p],self.refPathParsedPnts[p+1])\n\n return totaltogoal\n\n def get_index_nearest_obst_infront(self,node_current):\n # loop through all obstacles at time step x and find if any is close of current pos:\n currentorient = node_current.list_paths[-1][-1].orientation\n currentpos = node_current.list_paths[-1][-1].position\n currenttimestep = node_current.list_paths[-1][-1].time_step\n currentVel = node_current.list_paths[-1][-1].velocity\n disttoobst = [np.inf] * len(self.list_obstacles)\n for i in range(len(self.list_obstacles)):\n obst = self.list_obstacles[i]\n obstPos = obst.state_at_time(currenttimestep)\n if currentorient is not None and obstPos is not None:\n dist = self.euclidean_distance(currentpos, obstPos.position)\n lookaheadVar = 1.375 * currentVel + 2.5\n if dist <= lookaheadVar:\n # calc orientation diff between car and obstacle:\n vectorToObst = np.array([currentpos, obstPos.position])\n vectorToObstOrient = self.calc_angle_of_position(vectorToObst, currentpos)\n orientdiff = self.calc_orientation_diff(currentorient, vectorToObstOrient)\n if abs(orientdiff) <= 0.261799:\n disttoobst[i]= dist\n else:\n disttoobst[i]= np.inf\n else:\n disttoobst[i]= np.inf\n index_smallest_dist = disttoobst.index(min(disttoobst))\n if disttoobst[index_smallest_dist] == np.inf:\n index_smallest_dist = -1\n return index_smallest_dist\n\n\n def test_if_in_goal_lanelet(self, node_current):\n pos = [node_current.list_paths[-1][-1].position]\n currentlanelet = self.scenario.lanelet_network.find_lanelet_by_position(pos)\n currentlanelet = currentlanelet[0][0]\n #result = self.is_goal_in_lane(currentlanelet)\n result = False\n if self.planningProblem.goal.lanelets_of_goal_position is not None:\n if currentlanelet in self.planningProblem.goal.lanelets_of_goal_position.get(0):\n result = True\n return result\n\n def cost_for_modeA_problem_old(self, node_current, output_logs):\n # Function for planning problem with desired time, position, speed and orientation\n if self.position_desired is None:\n if output_logs:\n print(\"exit Cost function because position desired is None!\")\n return self.time_desired.start - node_current.list_paths[-1][-1].time_step\n else:\n velocity = node_current.list_paths[-1][-1].velocity\n path_last = node_current.list_paths[-1]\n if np.isclose(velocity, 0):\n return np.inf\n else:\n # Calc Variables:\n distance = self.calc_euclidean_distance(current_node=node_current)\n angleToGoal = self.calc_angle_to_goal(path_last[-1])\n orientationToGoalDiff = self.calc_orientation_diff(angleToGoal, path_last[-1].orientation)\n orientationToGoalDiffdegree = (abs(orientationToGoalDiff)*180)/3.14\n desired_orient = (self.orientation_desired.end + self.orientation_desired.start) / 2\n desired_velocity = (self.velocity_desired.start + self.velocity_desired.end)/2\n diff_desiredOrient = self.calc_orientation_diff(desired_orient, path_last[-1].orientation)\n diff_deiredVelocity = abs(velocity - desired_velocity)\n angle_intervall = abs(abs(self.orientation_desired.start) - abs(self.orientation_desired.end))\n\n # Output data for debugging:\n if output_logs:\n print(\"Distance to goal of current node is: \", distance)\n print(\"Velocity of current node is: \", velocity)\n print(\"Orientation of current position: \",node_current.list_paths[-1][-1].orientation)\n print(\"Angle to goal of current node is: \", angleToGoal)\n print(\"orientation diff to goal of current node is(deg): \", orientationToGoalDiffdegree)\n print(\"diff desired orient of current node is(deg): \", diff_desiredOrient)\n print(\"diff desired velocity of current node is(deg): \", diff_deiredVelocity)\n\n # if very colse to goal, minimize the diff velocity and diff orient\n if distance <= 1:\n desired_vel_weight = 1\n desired_orient_weight = 1\n cost = desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = cost + desired_orient_weight * diff_desiredOrient\n return cost\n\n\n # If very close to target but time is still not reached:\n if distance <= 0.1 and node_current.list_paths[-1][-1].time_step < self.time_desired.start:\n return (self.time_desired.start - node_current.list_paths[-1][-1].time_step) *0.001\n\n # check if goal in in field of view:\n if orientationToGoalDiffdegree > 45:\n # goal is not in field of view:\n # give more weight to speed and follow reference path blindly:\n # block to differentiate between large distance to goal and small distance:\n if distance >= 10: # too far away from target, just follow the least distance and target lanelet.\n velocity_weight = 1\n cost = distance / velocity\n\n return cost\n\n if distance < 10 and distance >= 5: # almost close, reduce speed.\n return np.inf\n\n if distance < 5: # very close andjust orientation angle..\n return np.inf\n else:\n # goal is in field of view:\n # give more weight to distance and speed and orientation goals:\n # goal is not in field of view:\n # give more weight to speed and follow reference path blindly:\n # block to differentiate between large distance to goal and small distance:\n if distance >= 10: # too far away from target, just follow the least distance and target lanelet.\n velocity_weight = 1\n cost = distance / velocity * velocity_weight\n return cost\n\n if distance < 10 and distance >= 5: # almost close, reduce speed.\n velocity_weight = 0.5\n desired_vel_weight = 1\n desired_orient_weight = 1\n cost = distance / velocity\n cost = cost + desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = cost + desired_orient_weight * diff_desiredOrient\n return cost\n\n if distance < 5: # very close andjust orientation angle..\n cost = distance / velocity\n desired_vel_weight = 3\n desired_orient_weight = 3\n cost = cost + desired_vel_weight * diff_deiredVelocity\n if angle_intervall < 1 and angle_intervall != 0:\n cost = cost + desired_orient_weight * diff_desiredOrient\n return cost\n",
"step-ids": [
9,
11,
12,
13,
17
]
}
|
[
9,
11,
12,
13,
17
] |
str = 'Hello world'
print ("字符串长度 : %d" %(len(str)))
print("字符串的长度 444:",len(str))
print (str)
print (str[0])
print (str[1:5])
print (str[:len(str)])
print (str[1:]*3)
print (str[1:]*5)
print ('字符串拼接')
print ("Hello" + "world")
#print ("python : str.join Test")
str1 = "-"
print (str1.join(str))
list = [1,2,3,4]
for a in str :
print ("当前字母:",a)
n = 0
for s in list :
print ("list[%d] :%d" %(n++,s));
|
normal
|
{
"blob_id": "77b7a0ae115aa063512ea7d6e91811470a4cf9d0",
"index": 2187,
"step-1": "\nstr = 'Hello world'\n\nprint (\"字符串长度 : %d\" %(len(str)))\nprint(\"字符串的长度 444:\",len(str))\nprint (str)\nprint (str[0])\nprint (str[1:5])\nprint (str[:len(str)])\nprint (str[1:]*3)\nprint (str[1:]*5)\n\nprint ('字符串拼接')\n\nprint (\"Hello\" + \"world\")\n\n\n#print (\"python : str.join Test\")\nstr1 = \"-\"\n\nprint (str1.join(str))\n\n\nlist = [1,2,3,4]\n\n\nfor a in str :\n print (\"当前字母:\",a)\n\nn = 0\nfor s in list :\n print (\"list[%d] :%d\" %(n++,s));\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import numpy as np
import torch
from timm.data.transforms_factory import transforms_imagenet_eval
from torchvision import transforms
from PIL import Image
def preprocess(args, src_path, save_path):
if isinstance(args.input_size, tuple):
img_size = args.input_size[-2:]
else:
img_size = args.input_size
preprocesser = transforms_imagenet_eval(
img_size,
interpolation=args.interpolation,
use_prefetcher=args.use_prefetcher,
mean=args.mean,
std=args.std,
crop_pct=args.crop_pct)
i = 0
in_files = os.listdir(src_path)
for file in in_files:
i = i + 1
print(file, "===", i)
input_image = Image.open(src_path + file).convert('RGB')
input_tensor = preprocesser(input_image)
img = np.array(input_tensor).astype(np.float32)
img = (img - np.array([x * 255 for x in args.mean]).reshape(3, 1, 1)) / np.array(
[x * 255 for x in args.std]).reshape(3, 1, 1)
img = img.astype(np.float32)
img.tofile(os.path.join(save_path, file.split('.')[0] + ".bin"))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', default='', type=str)
parser.add_argument('--save_path', default='', type=str)
parser.add_argument('--interpolation', default='bicubic', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('use_prefetcher', action='store_true', default=True,
help='enable fast prefetcher')
parser.add_argument('--crop-pct', default=0.9, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
args = parser.parse_args()
args.mean = (0.485, 0.456, 0.406)
args.std = (0.229, 0.224, 0.225)
args.input_size = (3, 224, 224)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
preprocess(args, args.src_path, args.save_path)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "443ed24ab396e83dbf12558207376258124bca8b",
"index": 4094,
"step-1": "<mask token>\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n preprocesser = transforms_imagenet_eval(img_size, interpolation=args.\n interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,\n std=args.std, crop_pct=args.crop_pct)\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, '===', i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)\n ) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n preprocesser = transforms_imagenet_eval(img_size, interpolation=args.\n interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,\n std=args.std, crop_pct=args.crop_pct)\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, '===', i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)\n ) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import argparse\nimport os\nimport numpy as np\nimport torch\nfrom timm.data.transforms_factory import transforms_imagenet_eval\nfrom torchvision import transforms\nfrom PIL import Image\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n preprocesser = transforms_imagenet_eval(img_size, interpolation=args.\n interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,\n std=args.std, crop_pct=args.crop_pct)\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, '===', i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)\n ) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport numpy as np\n\nimport torch\nfrom timm.data.transforms_factory import transforms_imagenet_eval\nfrom torchvision import transforms\nfrom PIL import Image\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n\n preprocesser = transforms_imagenet_eval(\n img_size,\n interpolation=args.interpolation,\n use_prefetcher=args.use_prefetcher,\n mean=args.mean,\n std=args.std,\n crop_pct=args.crop_pct)\n\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, \"===\", i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([x * 255 for x in args.mean]).reshape(3, 1, 1)) / np.array(\n [x * 255 for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + \".bin\"))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str, metavar='NAME',\n help='Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float,\n metavar='N', help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = (0.485, 0.456, 0.406)\n args.std = (0.229, 0.224, 0.225)\n args.input_size = (3, 224, 224)\n\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n\n preprocess(args, args.src_path, args.save_path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from fgpio import GPIO
import boards
|
flexible
|
{
"blob_id": "f66f79cd4132b23c082149a3a1d887f661fd7ee5",
"index": 7247,
"step-1": "<mask token>\n",
"step-2": "from fgpio import GPIO\nimport boards\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CustomLogger.init_log()
<|reserved_special_token_0|>
CustomLogger.info('[main]', log_str)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CustomLogger.init_log()
log_str = '%s/%s/%s\n' % ('demo1', 'demo2', 'demo3')
CustomLogger.info('[main]', log_str)
<|reserved_special_token_1|>
from python_logging.Demo_CustomLogger import CustomLogger
CustomLogger.init_log()
log_str = '%s/%s/%s\n' % ('demo1', 'demo2', 'demo3')
CustomLogger.info('[main]', log_str)
<|reserved_special_token_1|>
from python_logging.Demo_CustomLogger import CustomLogger
CustomLogger.init_log()
# CustomLogger.info()
log_str = '%s/%s/%s\n' % ("demo1", "demo2", "demo3")
CustomLogger.info('[main]', log_str)
|
flexible
|
{
"blob_id": "ed5653455062cb3468c232cf0fa3f1d18793626a",
"index": 591,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nCustomLogger.init_log()\n<mask token>\nCustomLogger.info('[main]', log_str)\n",
"step-3": "<mask token>\nCustomLogger.init_log()\nlog_str = '%s/%s/%s\\n' % ('demo1', 'demo2', 'demo3')\nCustomLogger.info('[main]', log_str)\n",
"step-4": "from python_logging.Demo_CustomLogger import CustomLogger\nCustomLogger.init_log()\nlog_str = '%s/%s/%s\\n' % ('demo1', 'demo2', 'demo3')\nCustomLogger.info('[main]', log_str)\n",
"step-5": "from python_logging.Demo_CustomLogger import CustomLogger\n\nCustomLogger.init_log()\n# CustomLogger.info()\nlog_str = '%s/%s/%s\\n' % (\"demo1\", \"demo2\", \"demo3\")\nCustomLogger.info('[main]', log_str)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import requests
import boto3
import uuid
import time
profile_name = 'mine'
region = 'us-west-2'
session = boto3.Session(profile_name=profile_name)
api = session.client('apigateway', region_name=region)
cf = session.client('cloudformation', region_name=region)
def get_key(name_of_key):
print('Discovering API Key')
response = api.get_api_keys(includeValues=True)
items = response['items']
for item in items:
if name_of_key in item['name']:
return item['value']
def get_url(name_of_stack):
print('Discovering Cloudformation Exports')
exports = cf.list_exports()['Exports']
for export in exports:
if export['Name'] == 'url-{}'.format(name_of_stack):
return export['Value']
def post(url, key, data):
data_json = json.dumps(data)
headers = {'Content-type': 'application/json', 'x-api-key': key}
return requests.post(url, data=data_json, headers=headers)
if __name__ == "__main__":
name = 'advanced'
full_url = get_url(name)
api_key = get_key(name)
while True:
body = {
"input": [
str(uuid.uuid4()),
str(uuid.uuid4())
]
}
print(post(full_url, api_key, body))
time.sleep(1)
|
normal
|
{
"blob_id": "10fda09f47c292cb3dc901f42d38ead7757460f5",
"index": 3699,
"step-1": "<mask token>\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == '__main__':\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-3": "<mask token>\nprofile_name = 'mine'\nregion = 'us-west-2'\nsession = boto3.Session(profile_name=profile_name)\napi = session.client('apigateway', region_name=region)\ncf = session.client('cloudformation', region_name=region)\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == '__main__':\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-4": "import json\nimport requests\nimport boto3\nimport uuid\nimport time\nprofile_name = 'mine'\nregion = 'us-west-2'\nsession = boto3.Session(profile_name=profile_name)\napi = session.client('apigateway', region_name=region)\ncf = session.client('cloudformation', region_name=region)\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == '__main__':\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-5": "import json\nimport requests\nimport boto3\nimport uuid\nimport time\n\nprofile_name = 'mine'\nregion = 'us-west-2'\nsession = boto3.Session(profile_name=profile_name)\napi = session.client('apigateway', region_name=region)\ncf = session.client('cloudformation', region_name=region)\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == \"__main__\":\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {\n \"input\": [\n str(uuid.uuid4()),\n str(uuid.uuid4())\n ]\n }\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from distutils.core import setup
setup(
name="zuknuft",
version="0.1",
author="riotbib",
author_email="riotbib@github",
scripts=["zukunft.py"],
install_requires=[
'bottle',
],
)
|
normal
|
{
"blob_id": "638842cda666100ce197437cb354f66de77eb328",
"index": 8065,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='zuknuft', version='0.1', author='riotbib', author_email=\n 'riotbib@github', scripts=['zukunft.py'], install_requires=['bottle'])\n",
"step-3": "from distutils.core import setup\nsetup(name='zuknuft', version='0.1', author='riotbib', author_email=\n 'riotbib@github', scripts=['zukunft.py'], install_requires=['bottle'])\n",
"step-4": "from distutils.core import setup\n\nsetup(\n name=\"zuknuft\",\n version=\"0.1\",\n author=\"riotbib\",\n author_email=\"riotbib@github\",\n scripts=[\"zukunft.py\"],\n install_requires=[\n 'bottle',\n ],\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
return time.time() - start_time, found
def ordered_sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
elif search_elt > elt:
break
return time.time() - start_time, found
<|reserved_special_token_0|>
def binary_search_rec(a_list, item):
if len(a_list) == 0:
return False
else:
midpoint = len(a_list) // 2
if a_list[midpoint] == item:
return True
elif item < a_list[midpoint]:
return binary_search_rec(a_list[:midpoint], item)
else:
return binary_search_rec(a_list[midpoint + 1:], item)
def binary_search_recursive(my_list, search_elt, start_time=time.time):
start_time = time.time()
return time.time() - start_time, binary_search_rec(my_list, search_elt)
<|reserved_special_token_0|>
def functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):
fn_name, fn_function, fn_list_indx = fn
timing, _ = fn_function(rnd_list[fn_list_indx], -1)
if amt_of_nb not in timeAggregator:
timeAggregator[amt_of_nb] = {}
if fn_name not in timeAggregator[amt_of_nb]:
timeAggregator[amt_of_nb][fn_name] = 0
timeAggregator[amt_of_nb][fn_name] += timing
def printTimerAggregator(timeAggregator, list_size):
for amount_of_number, fn_type in timeAggregator.iteritems():
print('For %s size of list:' % amount_of_number)
for fn_name, consumedTime in fn_type.iteritems():
print('\t%s took %10.7f seconds to run, on average' % (fn_name,
consumedTime / list_size))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
return time.time() - start_time, found
def ordered_sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
elif search_elt > elt:
break
return time.time() - start_time, found
def binary_search_iterative(my_list, search_elt):
first = 0
last = len(my_list) - 1
found = False
start_time = time.time()
while first <= last and not found:
midpoint = (first + last) // 2
if my_list[midpoint] == search_elt:
found = True
elif search_elt < my_list[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return time.time() - start_time, found
def binary_search_rec(a_list, item):
if len(a_list) == 0:
return False
else:
midpoint = len(a_list) // 2
if a_list[midpoint] == item:
return True
elif item < a_list[midpoint]:
return binary_search_rec(a_list[:midpoint], item)
else:
return binary_search_rec(a_list[midpoint + 1:], item)
def binary_search_recursive(my_list, search_elt, start_time=time.time):
start_time = time.time()
return time.time() - start_time, binary_search_rec(my_list, search_elt)
<|reserved_special_token_0|>
def functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):
fn_name, fn_function, fn_list_indx = fn
timing, _ = fn_function(rnd_list[fn_list_indx], -1)
if amt_of_nb not in timeAggregator:
timeAggregator[amt_of_nb] = {}
if fn_name not in timeAggregator[amt_of_nb]:
timeAggregator[amt_of_nb][fn_name] = 0
timeAggregator[amt_of_nb][fn_name] += timing
def printTimerAggregator(timeAggregator, list_size):
for amount_of_number, fn_type in timeAggregator.iteritems():
print('For %s size of list:' % amount_of_number)
for fn_name, consumedTime in fn_type.iteritems():
print('\t%s took %10.7f seconds to run, on average' % (fn_name,
consumedTime / list_size))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
return time.time() - start_time, found
def ordered_sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
elif search_elt > elt:
break
return time.time() - start_time, found
def binary_search_iterative(my_list, search_elt):
first = 0
last = len(my_list) - 1
found = False
start_time = time.time()
while first <= last and not found:
midpoint = (first + last) // 2
if my_list[midpoint] == search_elt:
found = True
elif search_elt < my_list[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return time.time() - start_time, found
def binary_search_rec(a_list, item):
if len(a_list) == 0:
return False
else:
midpoint = len(a_list) // 2
if a_list[midpoint] == item:
return True
elif item < a_list[midpoint]:
return binary_search_rec(a_list[:midpoint], item)
else:
return binary_search_rec(a_list[midpoint + 1:], item)
def binary_search_recursive(my_list, search_elt, start_time=time.time):
start_time = time.time()
return time.time() - start_time, binary_search_rec(my_list, search_elt)
def generate_random_nb_my_list(nb, amount_my_list, maxNumber=sys.maxint):
return [[random.randint(0, maxNumber) for _ in range(nb)] for _ in
range(amount_my_list)]
def functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):
fn_name, fn_function, fn_list_indx = fn
timing, _ = fn_function(rnd_list[fn_list_indx], -1)
if amt_of_nb not in timeAggregator:
timeAggregator[amt_of_nb] = {}
if fn_name not in timeAggregator[amt_of_nb]:
timeAggregator[amt_of_nb][fn_name] = 0
timeAggregator[amt_of_nb][fn_name] += timing
def printTimerAggregator(timeAggregator, list_size):
for amount_of_number, fn_type in timeAggregator.iteritems():
print('For %s size of list:' % amount_of_number)
for fn_name, consumedTime in fn_type.iteritems():
print('\t%s took %10.7f seconds to run, on average' % (fn_name,
consumedTime / list_size))
if __name__ == '__main__':
timeAggregator = {}
amount_of_numbers = [500, 1000, 10000]
function_list = [('Sequential Search', sequential_search, 0), (
'Ordered Sequential Search', ordered_sequential_search, 1), (
'Binary Search Iterative', binary_search_iterative, 1), (
'Binary Search Recursive', binary_search_recursive, 1)]
list_size = 100
for amount_of_number in amount_of_numbers:
my_randoms = generate_random_nb_my_list(amount_of_number, list_size)
for unsorted_list in my_randoms:
sorted_list = unsorted_list[:]
sorted_list.sort()
for fn in function_list:
functionTimerAggregator(timeAggregator, fn,
amount_of_number, (unsorted_list, sorted_list))
printTimerAggregator(timeAggregator, list_size)
<|reserved_special_token_1|>
import time
import random
import sys
def sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
return time.time() - start_time, found
def ordered_sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
elif search_elt > elt:
break
return time.time() - start_time, found
def binary_search_iterative(my_list, search_elt):
first = 0
last = len(my_list) - 1
found = False
start_time = time.time()
while first <= last and not found:
midpoint = (first + last) // 2
if my_list[midpoint] == search_elt:
found = True
elif search_elt < my_list[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return time.time() - start_time, found
def binary_search_rec(a_list, item):
if len(a_list) == 0:
return False
else:
midpoint = len(a_list) // 2
if a_list[midpoint] == item:
return True
elif item < a_list[midpoint]:
return binary_search_rec(a_list[:midpoint], item)
else:
return binary_search_rec(a_list[midpoint + 1:], item)
def binary_search_recursive(my_list, search_elt, start_time=time.time):
start_time = time.time()
return time.time() - start_time, binary_search_rec(my_list, search_elt)
def generate_random_nb_my_list(nb, amount_my_list, maxNumber=sys.maxint):
return [[random.randint(0, maxNumber) for _ in range(nb)] for _ in
range(amount_my_list)]
def functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):
fn_name, fn_function, fn_list_indx = fn
timing, _ = fn_function(rnd_list[fn_list_indx], -1)
if amt_of_nb not in timeAggregator:
timeAggregator[amt_of_nb] = {}
if fn_name not in timeAggregator[amt_of_nb]:
timeAggregator[amt_of_nb][fn_name] = 0
timeAggregator[amt_of_nb][fn_name] += timing
def printTimerAggregator(timeAggregator, list_size):
for amount_of_number, fn_type in timeAggregator.iteritems():
print('For %s size of list:' % amount_of_number)
for fn_name, consumedTime in fn_type.iteritems():
print('\t%s took %10.7f seconds to run, on average' % (fn_name,
consumedTime / list_size))
if __name__ == '__main__':
timeAggregator = {}
amount_of_numbers = [500, 1000, 10000]
function_list = [('Sequential Search', sequential_search, 0), (
'Ordered Sequential Search', ordered_sequential_search, 1), (
'Binary Search Iterative', binary_search_iterative, 1), (
'Binary Search Recursive', binary_search_recursive, 1)]
list_size = 100
for amount_of_number in amount_of_numbers:
my_randoms = generate_random_nb_my_list(amount_of_number, list_size)
for unsorted_list in my_randoms:
sorted_list = unsorted_list[:]
sorted_list.sort()
for fn in function_list:
functionTimerAggregator(timeAggregator, fn,
amount_of_number, (unsorted_list, sorted_list))
printTimerAggregator(timeAggregator, list_size)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import random
import sys
def sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
return (time.time() - start_time), found
def ordered_sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
elif search_elt > elt:
break
return (time.time() - start_time), found
def binary_search_iterative(my_list, search_elt):
first = 0
last = len(my_list) - 1
found = False
start_time = time.time()
while first <= last and not found:
midpoint = (first + last) // 2
if my_list[midpoint] == search_elt:
found = True
elif search_elt < my_list[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return (time.time() - start_time), found
def binary_search_rec(a_list, item):
if len(a_list) == 0:
return False
else:
midpoint = len(a_list) // 2
if a_list[midpoint] == item:
return True
elif item < a_list[midpoint]:
return binary_search_rec(a_list[:midpoint], item)
else:
return binary_search_rec(a_list[midpoint + 1:], item)
def binary_search_recursive(my_list, search_elt, start_time = time.time):
start_time = time.time()
return (time.time() - start_time), binary_search_rec(my_list, search_elt)
def generate_random_nb_my_list(nb, amount_my_list, maxNumber = sys.maxint):
return [
[random.randint(0, maxNumber) for _ in range (nb)]
for _ in range (amount_my_list)
]
def functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):
(fn_name, fn_function, fn_list_indx) = fn
(timing, _) = fn_function(rnd_list[fn_list_indx], -1)
if amt_of_nb not in timeAggregator:
timeAggregator[amt_of_nb] = {}
if fn_name not in timeAggregator[amt_of_nb]:
timeAggregator[amt_of_nb][fn_name] = 0
timeAggregator[amt_of_nb][fn_name] += timing
def printTimerAggregator(timeAggregator, list_size):
for amount_of_number, fn_type in timeAggregator.iteritems():
print('For %s size of list:' % amount_of_number)
for fn_name, consumedTime in fn_type.iteritems():
print('\t%s took %10.7f seconds to run, on average'
% (fn_name, consumedTime / list_size))
if __name__ == '__main__':
timeAggregator = {}
amount_of_numbers = [500, 1000, 10000]
function_list = [
('Sequential Search', sequential_search, 0),
('Ordered Sequential Search', ordered_sequential_search, 1),
('Binary Search Iterative', binary_search_iterative, 1),
('Binary Search Recursive', binary_search_recursive, 1),
]
list_size = 100
for amount_of_number in amount_of_numbers:
my_randoms = generate_random_nb_my_list(amount_of_number, list_size)
for unsorted_list in my_randoms:
sorted_list = unsorted_list[:]
sorted_list.sort()
for fn in function_list:
functionTimerAggregator(
timeAggregator, fn, amount_of_number,
(unsorted_list, sorted_list))
printTimerAggregator(timeAggregator, list_size)
|
flexible
|
{
"blob_id": "f3a34d1c37165490c77ccd21f428718c8c90f866",
"index": 4057,
"step-1": "<mask token>\n\n\ndef sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n return time.time() - start_time, found\n\n\ndef ordered_sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n elif search_elt > elt:\n break\n return time.time() - start_time, found\n\n\n<mask token>\n\n\ndef binary_search_rec(a_list, item):\n if len(a_list) == 0:\n return False\n else:\n midpoint = len(a_list) // 2\n if a_list[midpoint] == item:\n return True\n elif item < a_list[midpoint]:\n return binary_search_rec(a_list[:midpoint], item)\n else:\n return binary_search_rec(a_list[midpoint + 1:], item)\n\n\ndef binary_search_recursive(my_list, search_elt, start_time=time.time):\n start_time = time.time()\n return time.time() - start_time, binary_search_rec(my_list, search_elt)\n\n\n<mask token>\n\n\ndef functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):\n fn_name, fn_function, fn_list_indx = fn\n timing, _ = fn_function(rnd_list[fn_list_indx], -1)\n if amt_of_nb not in timeAggregator:\n timeAggregator[amt_of_nb] = {}\n if fn_name not in timeAggregator[amt_of_nb]:\n timeAggregator[amt_of_nb][fn_name] = 0\n timeAggregator[amt_of_nb][fn_name] += timing\n\n\ndef printTimerAggregator(timeAggregator, list_size):\n for amount_of_number, fn_type in timeAggregator.iteritems():\n print('For %s size of list:' % amount_of_number)\n for fn_name, consumedTime in fn_type.iteritems():\n print('\\t%s took %10.7f seconds to run, on average' % (fn_name,\n consumedTime / list_size))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n return time.time() - start_time, found\n\n\ndef ordered_sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n elif search_elt > elt:\n break\n return time.time() - start_time, found\n\n\ndef binary_search_iterative(my_list, search_elt):\n first = 0\n last = len(my_list) - 1\n found = False\n start_time = time.time()\n while first <= last and not found:\n midpoint = (first + last) // 2\n if my_list[midpoint] == search_elt:\n found = True\n elif search_elt < my_list[midpoint]:\n last = midpoint - 1\n else:\n first = midpoint + 1\n return time.time() - start_time, found\n\n\ndef binary_search_rec(a_list, item):\n if len(a_list) == 0:\n return False\n else:\n midpoint = len(a_list) // 2\n if a_list[midpoint] == item:\n return True\n elif item < a_list[midpoint]:\n return binary_search_rec(a_list[:midpoint], item)\n else:\n return binary_search_rec(a_list[midpoint + 1:], item)\n\n\ndef binary_search_recursive(my_list, search_elt, start_time=time.time):\n start_time = time.time()\n return time.time() - start_time, binary_search_rec(my_list, search_elt)\n\n\n<mask token>\n\n\ndef functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):\n fn_name, fn_function, fn_list_indx = fn\n timing, _ = fn_function(rnd_list[fn_list_indx], -1)\n if amt_of_nb not in timeAggregator:\n timeAggregator[amt_of_nb] = {}\n if fn_name not in timeAggregator[amt_of_nb]:\n timeAggregator[amt_of_nb][fn_name] = 0\n timeAggregator[amt_of_nb][fn_name] += timing\n\n\ndef printTimerAggregator(timeAggregator, list_size):\n for amount_of_number, fn_type in timeAggregator.iteritems():\n print('For %s size of list:' % amount_of_number)\n for fn_name, consumedTime in fn_type.iteritems():\n print('\\t%s took %10.7f seconds to run, on average' % (fn_name,\n consumedTime / list_size))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n return time.time() - start_time, found\n\n\ndef ordered_sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n elif search_elt > elt:\n break\n return time.time() - start_time, found\n\n\ndef binary_search_iterative(my_list, search_elt):\n first = 0\n last = len(my_list) - 1\n found = False\n start_time = time.time()\n while first <= last and not found:\n midpoint = (first + last) // 2\n if my_list[midpoint] == search_elt:\n found = True\n elif search_elt < my_list[midpoint]:\n last = midpoint - 1\n else:\n first = midpoint + 1\n return time.time() - start_time, found\n\n\ndef binary_search_rec(a_list, item):\n if len(a_list) == 0:\n return False\n else:\n midpoint = len(a_list) // 2\n if a_list[midpoint] == item:\n return True\n elif item < a_list[midpoint]:\n return binary_search_rec(a_list[:midpoint], item)\n else:\n return binary_search_rec(a_list[midpoint + 1:], item)\n\n\ndef binary_search_recursive(my_list, search_elt, start_time=time.time):\n start_time = time.time()\n return time.time() - start_time, binary_search_rec(my_list, search_elt)\n\n\ndef generate_random_nb_my_list(nb, amount_my_list, maxNumber=sys.maxint):\n return [[random.randint(0, maxNumber) for _ in range(nb)] for _ in\n range(amount_my_list)]\n\n\ndef functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):\n fn_name, fn_function, fn_list_indx = fn\n timing, _ = fn_function(rnd_list[fn_list_indx], -1)\n if amt_of_nb not in timeAggregator:\n timeAggregator[amt_of_nb] = {}\n if fn_name not in timeAggregator[amt_of_nb]:\n timeAggregator[amt_of_nb][fn_name] = 0\n timeAggregator[amt_of_nb][fn_name] += timing\n\n\ndef printTimerAggregator(timeAggregator, list_size):\n for amount_of_number, fn_type in timeAggregator.iteritems():\n print('For %s size of list:' % amount_of_number)\n for fn_name, consumedTime in fn_type.iteritems():\n print('\\t%s took %10.7f seconds to run, on average' % (fn_name,\n consumedTime / list_size))\n\n\nif __name__ == '__main__':\n timeAggregator = {}\n amount_of_numbers = [500, 1000, 10000]\n function_list = [('Sequential Search', sequential_search, 0), (\n 'Ordered Sequential Search', ordered_sequential_search, 1), (\n 'Binary Search Iterative', binary_search_iterative, 1), (\n 'Binary Search Recursive', binary_search_recursive, 1)]\n list_size = 100\n for amount_of_number in amount_of_numbers:\n my_randoms = generate_random_nb_my_list(amount_of_number, list_size)\n for unsorted_list in my_randoms:\n sorted_list = unsorted_list[:]\n sorted_list.sort()\n for fn in function_list:\n functionTimerAggregator(timeAggregator, fn,\n amount_of_number, (unsorted_list, sorted_list))\n printTimerAggregator(timeAggregator, list_size)\n",
"step-4": "import time\nimport random\nimport sys\n\n\ndef sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n return time.time() - start_time, found\n\n\ndef ordered_sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n elif search_elt > elt:\n break\n return time.time() - start_time, found\n\n\ndef binary_search_iterative(my_list, search_elt):\n first = 0\n last = len(my_list) - 1\n found = False\n start_time = time.time()\n while first <= last and not found:\n midpoint = (first + last) // 2\n if my_list[midpoint] == search_elt:\n found = True\n elif search_elt < my_list[midpoint]:\n last = midpoint - 1\n else:\n first = midpoint + 1\n return time.time() - start_time, found\n\n\ndef binary_search_rec(a_list, item):\n if len(a_list) == 0:\n return False\n else:\n midpoint = len(a_list) // 2\n if a_list[midpoint] == item:\n return True\n elif item < a_list[midpoint]:\n return binary_search_rec(a_list[:midpoint], item)\n else:\n return binary_search_rec(a_list[midpoint + 1:], item)\n\n\ndef binary_search_recursive(my_list, search_elt, start_time=time.time):\n start_time = time.time()\n return time.time() - start_time, binary_search_rec(my_list, search_elt)\n\n\ndef generate_random_nb_my_list(nb, amount_my_list, maxNumber=sys.maxint):\n return [[random.randint(0, maxNumber) for _ in range(nb)] for _ in\n range(amount_my_list)]\n\n\ndef functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):\n fn_name, fn_function, fn_list_indx = fn\n timing, _ = fn_function(rnd_list[fn_list_indx], -1)\n if amt_of_nb not in timeAggregator:\n timeAggregator[amt_of_nb] = {}\n if fn_name not in timeAggregator[amt_of_nb]:\n timeAggregator[amt_of_nb][fn_name] = 0\n timeAggregator[amt_of_nb][fn_name] += timing\n\n\ndef printTimerAggregator(timeAggregator, list_size):\n for amount_of_number, fn_type in timeAggregator.iteritems():\n print('For %s size of list:' % amount_of_number)\n for fn_name, consumedTime in fn_type.iteritems():\n print('\\t%s took %10.7f seconds to run, on average' % (fn_name,\n consumedTime / list_size))\n\n\nif __name__ == '__main__':\n timeAggregator = {}\n amount_of_numbers = [500, 1000, 10000]\n function_list = [('Sequential Search', sequential_search, 0), (\n 'Ordered Sequential Search', ordered_sequential_search, 1), (\n 'Binary Search Iterative', binary_search_iterative, 1), (\n 'Binary Search Recursive', binary_search_recursive, 1)]\n list_size = 100\n for amount_of_number in amount_of_numbers:\n my_randoms = generate_random_nb_my_list(amount_of_number, list_size)\n for unsorted_list in my_randoms:\n sorted_list = unsorted_list[:]\n sorted_list.sort()\n for fn in function_list:\n functionTimerAggregator(timeAggregator, fn,\n amount_of_number, (unsorted_list, sorted_list))\n printTimerAggregator(timeAggregator, list_size)\n",
"step-5": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport time\r\nimport random\r\nimport sys\r\n\r\ndef sequential_search(my_list, search_elt):\r\n\tfound = False\r\n\tstart_time = time.time()\r\n\tfor elt in my_list:\r\n\t\tif search_elt == elt:\r\n\t\t\tfound = True\r\n\t\t\tbreak\r\n\treturn (time.time() - start_time), found\r\n\r\ndef ordered_sequential_search(my_list, search_elt):\r\n\tfound = False\r\n\tstart_time = time.time()\r\n\tfor elt in my_list:\r\n\t\tif search_elt == elt:\r\n\t\t\tfound = True\r\n\t\t\tbreak\r\n\t\telif search_elt > elt:\r\n\t\t\tbreak\r\n\treturn (time.time() - start_time), found\r\n\r\ndef binary_search_iterative(my_list, search_elt):\r\n\tfirst = 0\r\n\tlast = len(my_list) - 1\r\n\tfound = False\r\n\r\n\tstart_time = time.time()\r\n\twhile first <= last and not found:\r\n\t\tmidpoint = (first + last) // 2\r\n\t\tif my_list[midpoint] == search_elt:\r\n\t\t\tfound = True\r\n\t\telif search_elt < my_list[midpoint]:\r\n\t\t\tlast = midpoint - 1\r\n\t\telse:\r\n\t\t\tfirst = midpoint + 1\r\n\r\n\treturn (time.time() - start_time), found\r\n\r\ndef binary_search_rec(a_list, item):\r\n\tif len(a_list) == 0:\r\n\t\treturn False\r\n\telse:\r\n\t\tmidpoint = len(a_list) // 2\r\n\t\tif a_list[midpoint] == item:\r\n\t\t\treturn True\r\n\t\telif item < a_list[midpoint]:\r\n\t\t\treturn binary_search_rec(a_list[:midpoint], item)\r\n\t\telse:\r\n\t\t\treturn binary_search_rec(a_list[midpoint + 1:], item)\r\n\r\ndef binary_search_recursive(my_list, search_elt, start_time = time.time):\r\n\tstart_time = time.time()\r\n\treturn (time.time() - start_time), binary_search_rec(my_list, search_elt)\r\n\r\ndef generate_random_nb_my_list(nb, amount_my_list, maxNumber = sys.maxint):\r\n\treturn [\r\n\t\t[random.randint(0, maxNumber) for _ in range (nb)]\r\n\t\t\tfor _ in range (amount_my_list)\r\n\t]\r\n\r\ndef functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):\r\n\t(fn_name, fn_function, fn_list_indx) = fn\r\n\t(timing, _) = fn_function(rnd_list[fn_list_indx], -1)\r\n\r\n\tif amt_of_nb not in timeAggregator:\r\n\t\ttimeAggregator[amt_of_nb] = {}\r\n\tif fn_name not in timeAggregator[amt_of_nb]:\r\n\t\ttimeAggregator[amt_of_nb][fn_name] = 0\t\r\n\ttimeAggregator[amt_of_nb][fn_name] += timing\r\n\r\ndef printTimerAggregator(timeAggregator, list_size):\r\n\tfor amount_of_number, fn_type in timeAggregator.iteritems():\r\n\t\tprint('For %s size of list:' % amount_of_number)\r\n\t\tfor fn_name, consumedTime in fn_type.iteritems():\r\n\t\t\tprint('\\t%s took %10.7f seconds to run, on average'\r\n\t\t\t\t% (fn_name, consumedTime / list_size))\r\n\r\nif __name__ == '__main__':\r\n\ttimeAggregator = {}\r\n\tamount_of_numbers = [500, 1000, 10000]\r\n\tfunction_list = [\r\n\t\t('Sequential Search', sequential_search, 0),\r\n\t\t('Ordered Sequential Search', ordered_sequential_search, 1),\r\n\t\t('Binary Search Iterative', binary_search_iterative, 1),\r\n\t\t('Binary Search Recursive', binary_search_recursive, 1),\r\n\t]\r\n\tlist_size = 100\r\n\r\n\tfor amount_of_number in amount_of_numbers:\r\n\t\tmy_randoms = generate_random_nb_my_list(amount_of_number, list_size)\r\n\t\tfor unsorted_list in my_randoms:\r\n\t\t\tsorted_list = unsorted_list[:]\r\n\t\t\tsorted_list.sort()\r\n\r\n\t\t\tfor fn in function_list:\r\n\t\t\t\tfunctionTimerAggregator(\r\n\t\t\t\t\ttimeAggregator, fn, amount_of_number,\r\n\t\t\t\t\t(unsorted_list, sorted_list))\r\n\r\n\tprintTimerAggregator(timeAggregator, list_size)\r\n\t",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
<|reserved_special_token_0|>
class Helper(object):
<|reserved_special_token_0|>
def base_dir(self, filePath, folder='data'):
"""
返回公共路径
:parameter folder:文件夹
:parameter filePath:文件名称
"""
return os.path.join(os.path.dirname(os.path.dirname(__file__)),
folder, filePath)
<|reserved_special_token_0|>
def getUrl(self, rowx):
"""
获取请求地址
:parameter rowx:在excel中的行数
"""
return self.readExcel(rowx)[1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Helper(object):
<|reserved_special_token_0|>
def base_dir(self, filePath, folder='data'):
"""
返回公共路径
:parameter folder:文件夹
:parameter filePath:文件名称
"""
return os.path.join(os.path.dirname(os.path.dirname(__file__)),
folder, filePath)
<|reserved_special_token_0|>
def getUrl(self, rowx):
"""
获取请求地址
:parameter rowx:在excel中的行数
"""
return self.readExcel(rowx)[1]
def getData(self, rowx):
"""
获取数据并且返回
:parameter rowx:在excel中的行数
"""
return json.loads(self.readExcel(rowx)[2])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Helper(object):
"""公共方法"""
def base_dir(self, filePath, folder='data'):
"""
返回公共路径
:parameter folder:文件夹
:parameter filePath:文件名称
"""
return os.path.join(os.path.dirname(os.path.dirname(__file__)),
folder, filePath)
def readExcel(self, rowx, filePath='data.xlsx'):
"""
读取excel中数据并且返回
:parameter filePath:xlsx文件名称
:parameter rowx:在excel中的行数
"""
book = xlrd.open_workbook(self.base_dir(filePath))
sheet = book.sheet_by_index(0)
return sheet.row_values(rowx)
def getUrl(self, rowx):
"""
获取请求地址
:parameter rowx:在excel中的行数
"""
return self.readExcel(rowx)[1]
def getData(self, rowx):
"""
获取数据并且返回
:parameter rowx:在excel中的行数
"""
return json.loads(self.readExcel(rowx)[2])
<|reserved_special_token_1|>
import os
import xlrd
import json
class Helper(object):
"""公共方法"""
def base_dir(self, filePath, folder='data'):
"""
返回公共路径
:parameter folder:文件夹
:parameter filePath:文件名称
"""
return os.path.join(os.path.dirname(os.path.dirname(__file__)),
folder, filePath)
def readExcel(self, rowx, filePath='data.xlsx'):
"""
读取excel中数据并且返回
:parameter filePath:xlsx文件名称
:parameter rowx:在excel中的行数
"""
book = xlrd.open_workbook(self.base_dir(filePath))
sheet = book.sheet_by_index(0)
return sheet.row_values(rowx)
def getUrl(self, rowx):
"""
获取请求地址
:parameter rowx:在excel中的行数
"""
return self.readExcel(rowx)[1]
def getData(self, rowx):
"""
获取数据并且返回
:parameter rowx:在excel中的行数
"""
return json.loads(self.readExcel(rowx)[2])
<|reserved_special_token_1|>
#!/usr/bin/env python
#-*-coding:utf-8-*-
#author:wuya
import os
import xlrd
import json
class Helper(object):
'''公共方法'''
def base_dir(self,filePath,folder='data'):
'''
返回公共路径
:parameter folder:文件夹
:parameter filePath:文件名称
'''
return os.path.join(
os.path.dirname(
os.path.dirname(__file__)),
folder,filePath)
def readExcel(self,rowx,filePath='data.xlsx'):
'''
读取excel中数据并且返回
:parameter filePath:xlsx文件名称
:parameter rowx:在excel中的行数
'''
book=xlrd.open_workbook(self.base_dir(filePath))
sheet=book.sheet_by_index(0)
return sheet.row_values(rowx)
def getUrl(self,rowx):
'''
获取请求地址
:parameter rowx:在excel中的行数
'''
return self.readExcel(rowx)[1]
def getData(self,rowx):
'''
获取数据并且返回
:parameter rowx:在excel中的行数
'''
return json.loads(self.readExcel(rowx)[2])
|
flexible
|
{
"blob_id": "7c2349810fc757848eeb5bddef4640d87d5f9ab9",
"index": 2439,
"step-1": "<mask token>\n\n\nclass Helper(object):\n <mask token>\n\n def base_dir(self, filePath, folder='data'):\n \"\"\"\n 返回公共路径\n :parameter folder:文件夹\n :parameter filePath:文件名称\n \"\"\"\n return os.path.join(os.path.dirname(os.path.dirname(__file__)),\n folder, filePath)\n <mask token>\n\n def getUrl(self, rowx):\n \"\"\"\n 获取请求地址\n :parameter rowx:在excel中的行数\n \"\"\"\n return self.readExcel(rowx)[1]\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Helper(object):\n <mask token>\n\n def base_dir(self, filePath, folder='data'):\n \"\"\"\n 返回公共路径\n :parameter folder:文件夹\n :parameter filePath:文件名称\n \"\"\"\n return os.path.join(os.path.dirname(os.path.dirname(__file__)),\n folder, filePath)\n <mask token>\n\n def getUrl(self, rowx):\n \"\"\"\n 获取请求地址\n :parameter rowx:在excel中的行数\n \"\"\"\n return self.readExcel(rowx)[1]\n\n def getData(self, rowx):\n \"\"\"\n 获取数据并且返回\n :parameter rowx:在excel中的行数\n \"\"\"\n return json.loads(self.readExcel(rowx)[2])\n",
"step-3": "<mask token>\n\n\nclass Helper(object):\n \"\"\"公共方法\"\"\"\n\n def base_dir(self, filePath, folder='data'):\n \"\"\"\n 返回公共路径\n :parameter folder:文件夹\n :parameter filePath:文件名称\n \"\"\"\n return os.path.join(os.path.dirname(os.path.dirname(__file__)),\n folder, filePath)\n\n def readExcel(self, rowx, filePath='data.xlsx'):\n \"\"\"\n 读取excel中数据并且返回\n :parameter filePath:xlsx文件名称\n :parameter rowx:在excel中的行数\n \"\"\"\n book = xlrd.open_workbook(self.base_dir(filePath))\n sheet = book.sheet_by_index(0)\n return sheet.row_values(rowx)\n\n def getUrl(self, rowx):\n \"\"\"\n 获取请求地址\n :parameter rowx:在excel中的行数\n \"\"\"\n return self.readExcel(rowx)[1]\n\n def getData(self, rowx):\n \"\"\"\n 获取数据并且返回\n :parameter rowx:在excel中的行数\n \"\"\"\n return json.loads(self.readExcel(rowx)[2])\n",
"step-4": "import os\nimport xlrd\nimport json\n\n\nclass Helper(object):\n \"\"\"公共方法\"\"\"\n\n def base_dir(self, filePath, folder='data'):\n \"\"\"\n 返回公共路径\n :parameter folder:文件夹\n :parameter filePath:文件名称\n \"\"\"\n return os.path.join(os.path.dirname(os.path.dirname(__file__)),\n folder, filePath)\n\n def readExcel(self, rowx, filePath='data.xlsx'):\n \"\"\"\n 读取excel中数据并且返回\n :parameter filePath:xlsx文件名称\n :parameter rowx:在excel中的行数\n \"\"\"\n book = xlrd.open_workbook(self.base_dir(filePath))\n sheet = book.sheet_by_index(0)\n return sheet.row_values(rowx)\n\n def getUrl(self, rowx):\n \"\"\"\n 获取请求地址\n :parameter rowx:在excel中的行数\n \"\"\"\n return self.readExcel(rowx)[1]\n\n def getData(self, rowx):\n \"\"\"\n 获取数据并且返回\n :parameter rowx:在excel中的行数\n \"\"\"\n return json.loads(self.readExcel(rowx)[2])\n",
"step-5": "#!/usr/bin/env python\n#-*-coding:utf-8-*-\n\n#author:wuya\n\n\nimport os\nimport xlrd\nimport json\n\n\nclass Helper(object):\n '''公共方法'''\n\n def base_dir(self,filePath,folder='data'):\n '''\n 返回公共路径\n :parameter folder:文件夹\n :parameter filePath:文件名称\n '''\n return os.path.join(\n os.path.dirname(\n os.path.dirname(__file__)),\n folder,filePath)\n\n def readExcel(self,rowx,filePath='data.xlsx'):\n '''\n 读取excel中数据并且返回\n :parameter filePath:xlsx文件名称\n :parameter rowx:在excel中的行数\n '''\n book=xlrd.open_workbook(self.base_dir(filePath))\n sheet=book.sheet_by_index(0)\n return sheet.row_values(rowx)\n\n def getUrl(self,rowx):\n '''\n 获取请求地址\n :parameter rowx:在excel中的行数\n '''\n return self.readExcel(rowx)[1]\n\n def getData(self,rowx):\n '''\n 获取数据并且返回\n :parameter rowx:在excel中的行数\n '''\n return json.loads(self.readExcel(rowx)[2])\n\n\n\n\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
from . import models
from . import wizards
from odoo import api, SUPERUSER_ID
from odoo.addons.account.models.chart_template import preserve_existing_tags_on_taxes
def _preserve_tag_on_taxes(cr, registry):
preserve_existing_tags_on_taxes(cr, registry, 'l10n_lb')
env = api.Environment(cr, SUPERUSER_ID, {})
accounts = env['account.account'].search([('code', 'in', ['5301','5121','999999'])])
accounts.unlink()
journal_id = env['account.journal'].search([('name', '=', 'Cash'),('type', '=', 'cash')],limit=1)
if journal_id:
account = env['account.account'].search([('code', '=', '53000001')],limit=1)
journal_id.write({
'default_debit_account_id': account.id,
'default_credit_account_id': account.id
})
|
normal
|
{
"blob_id": "74b38599dd793282612a468a760f6301b9f039d6",
"index": 9878,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef _preserve_tag_on_taxes(cr, registry):\n preserve_existing_tags_on_taxes(cr, registry, 'l10n_lb')\n env = api.Environment(cr, SUPERUSER_ID, {})\n accounts = env['account.account'].search([('code', 'in', ['5301',\n '5121', '999999'])])\n accounts.unlink()\n journal_id = env['account.journal'].search([('name', '=', 'Cash'), (\n 'type', '=', 'cash')], limit=1)\n if journal_id:\n account = env['account.account'].search([('code', '=', '53000001')],\n limit=1)\n journal_id.write({'default_debit_account_id': account.id,\n 'default_credit_account_id': account.id})\n",
"step-3": "from . import models\nfrom . import wizards\nfrom odoo import api, SUPERUSER_ID\nfrom odoo.addons.account.models.chart_template import preserve_existing_tags_on_taxes\n\n\ndef _preserve_tag_on_taxes(cr, registry):\n preserve_existing_tags_on_taxes(cr, registry, 'l10n_lb')\n env = api.Environment(cr, SUPERUSER_ID, {})\n accounts = env['account.account'].search([('code', 'in', ['5301',\n '5121', '999999'])])\n accounts.unlink()\n journal_id = env['account.journal'].search([('name', '=', 'Cash'), (\n 'type', '=', 'cash')], limit=1)\n if journal_id:\n account = env['account.account'].search([('code', '=', '53000001')],\n limit=1)\n journal_id.write({'default_debit_account_id': account.id,\n 'default_credit_account_id': account.id})\n",
"step-4": "# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\n# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr\n\nfrom . import models\nfrom . import wizards\nfrom odoo import api, SUPERUSER_ID\nfrom odoo.addons.account.models.chart_template import preserve_existing_tags_on_taxes\n\ndef _preserve_tag_on_taxes(cr, registry):\n preserve_existing_tags_on_taxes(cr, registry, 'l10n_lb')\n env = api.Environment(cr, SUPERUSER_ID, {})\n accounts = env['account.account'].search([('code', 'in', ['5301','5121','999999'])])\n accounts.unlink()\n\n journal_id = env['account.journal'].search([('name', '=', 'Cash'),('type', '=', 'cash')],limit=1)\n if journal_id:\n account = env['account.account'].search([('code', '=', '53000001')],limit=1)\n journal_id.write({\n 'default_debit_account_id': account.id,\n 'default_credit_account_id': account.id\n })\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from rest_framework import serializers
from films.models import *
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
films = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.objects.all())
theaters = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'films', 'theaters')
class GenreSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = ('id', 'name', 'film_set')
depth = 1
class GenreWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = ('id', 'name', 'film_set')
class FilmSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Film
fields = ('id', 'title', 'year_prod', 'genre', 'theater_set', 'owner')
depth = 1
class FilmWriteSerializer(serializers.ModelSerializer):
genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(), allow_null=True)
class Meta:
model = Film
fields = ('id', 'title', 'year_prod', 'genre', 'theater_set')
class TheaterSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Theater
fields = ('id', 'name', 'city', 'films', 'owner')
depth = 1
class TheaterWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Theater
fields = ('id', 'name', 'city')
|
normal
|
{
"blob_id": "e6aa28ae312ea5d7f0f818b7e86b0e76e2e57b48",
"index": 4652,
"step-1": "<mask token>\n\n\nclass FilmSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'\n depth = 1\n\n\nclass FilmWriteSerializer(serializers.ModelSerializer):\n genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),\n allow_null=True)\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'\n\n\nclass TheaterSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city', 'films', 'owner'\n depth = 1\n\n\nclass TheaterWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city'\n",
"step-2": "<mask token>\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n depth = 1\n\n\nclass GenreWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n\n\nclass FilmSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'\n depth = 1\n\n\nclass FilmWriteSerializer(serializers.ModelSerializer):\n genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),\n allow_null=True)\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'\n\n\nclass TheaterSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city', 'films', 'owner'\n depth = 1\n\n\nclass TheaterWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city'\n",
"step-3": "<mask token>\n\n\nclass UserSerializer(serializers.ModelSerializer):\n <mask token>\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'films', 'theaters'\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n depth = 1\n\n\nclass GenreWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n\n\nclass FilmSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'\n depth = 1\n\n\nclass FilmWriteSerializer(serializers.ModelSerializer):\n genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),\n allow_null=True)\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'\n\n\nclass TheaterSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city', 'films', 'owner'\n depth = 1\n\n\nclass TheaterWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city'\n",
"step-4": "<mask token>\n\n\nclass UserSerializer(serializers.ModelSerializer):\n films = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.\n objects.all())\n theaters = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.\n objects.all())\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'films', 'theaters'\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n depth = 1\n\n\nclass GenreWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n\n\nclass FilmSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'\n depth = 1\n\n\nclass FilmWriteSerializer(serializers.ModelSerializer):\n genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),\n allow_null=True)\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'\n\n\nclass TheaterSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city', 'films', 'owner'\n depth = 1\n\n\nclass TheaterWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city'\n",
"step-5": "from rest_framework import serializers\nfrom films.models import *\nfrom django.contrib.auth.models import User\n\nclass UserSerializer(serializers.ModelSerializer):\n films = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.objects.all())\n theaters = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.objects.all())\n\n class Meta:\n model = User\n fields = ('id', 'username', 'films', 'theaters')\n\nclass GenreSerializer(serializers.ModelSerializer):\n class Meta:\n model = Genre\n fields = ('id', 'name', 'film_set')\n depth = 1\n\nclass GenreWriteSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Genre\n fields = ('id', 'name', 'film_set')\n\nclass FilmSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n class Meta:\n model = Film\n fields = ('id', 'title', 'year_prod', 'genre', 'theater_set', 'owner')\n depth = 1\n\nclass FilmWriteSerializer(serializers.ModelSerializer):\n genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(), allow_null=True)\n\n class Meta:\n model = Film\n fields = ('id', 'title', 'year_prod', 'genre', 'theater_set')\n\nclass TheaterSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n class Meta:\n model = Theater\n fields = ('id', 'name', 'city', 'films', 'owner')\n depth = 1\n\nclass TheaterWriteSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Theater\n fields = ('id', 'name', 'city')\n",
"step-ids": [
7,
9,
10,
11,
13
]
}
|
[
7,
9,
10,
11,
13
] |
<|reserved_special_token_0|>
class RandomIterator(TransitionIterator):
_n_steps_per_epoch: int
<|reserved_special_token_0|>
def _reset(self) ->None:
pass
<|reserved_special_token_0|>
def _has_finished(self) ->bool:
return self._count >= self._n_steps_per_epoch
def __len__(self) ->int:
return self._n_steps_per_epoch
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RandomIterator(TransitionIterator):
_n_steps_per_epoch: int
<|reserved_special_token_0|>
def _reset(self) ->None:
pass
def _next(self) ->Transition:
index = cast(int, np.random.randint(len(self._transitions)))
transition = self._transitions[index]
return transition
def _has_finished(self) ->bool:
return self._count >= self._n_steps_per_epoch
def __len__(self) ->int:
return self._n_steps_per_epoch
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RandomIterator(TransitionIterator):
_n_steps_per_epoch: int
def __init__(self, transitions: List[Transition], n_steps_per_epoch:
int, batch_size: int, n_steps: int=1, gamma: float=0.99, n_frames:
int=1, real_ratio: float=1.0, generated_maxlen: int=100000):
super().__init__(transitions=transitions, batch_size=batch_size,
n_steps=n_steps, gamma=gamma, n_frames=n_frames, real_ratio=
real_ratio, generated_maxlen=generated_maxlen)
self._n_steps_per_epoch = n_steps_per_epoch
def _reset(self) ->None:
pass
def _next(self) ->Transition:
index = cast(int, np.random.randint(len(self._transitions)))
transition = self._transitions[index]
return transition
def _has_finished(self) ->bool:
return self._count >= self._n_steps_per_epoch
def __len__(self) ->int:
return self._n_steps_per_epoch
<|reserved_special_token_1|>
from typing import List, cast
import numpy as np
from ..dataset import Transition
from .base import TransitionIterator
class RandomIterator(TransitionIterator):
_n_steps_per_epoch: int
def __init__(self, transitions: List[Transition], n_steps_per_epoch:
int, batch_size: int, n_steps: int=1, gamma: float=0.99, n_frames:
int=1, real_ratio: float=1.0, generated_maxlen: int=100000):
super().__init__(transitions=transitions, batch_size=batch_size,
n_steps=n_steps, gamma=gamma, n_frames=n_frames, real_ratio=
real_ratio, generated_maxlen=generated_maxlen)
self._n_steps_per_epoch = n_steps_per_epoch
def _reset(self) ->None:
pass
def _next(self) ->Transition:
index = cast(int, np.random.randint(len(self._transitions)))
transition = self._transitions[index]
return transition
def _has_finished(self) ->bool:
return self._count >= self._n_steps_per_epoch
def __len__(self) ->int:
return self._n_steps_per_epoch
<|reserved_special_token_1|>
from typing import List, cast
import numpy as np
from ..dataset import Transition
from .base import TransitionIterator
class RandomIterator(TransitionIterator):
_n_steps_per_epoch: int
def __init__(
self,
transitions: List[Transition],
n_steps_per_epoch: int,
batch_size: int,
n_steps: int = 1,
gamma: float = 0.99,
n_frames: int = 1,
real_ratio: float = 1.0,
generated_maxlen: int = 100000,
):
super().__init__(
transitions=transitions,
batch_size=batch_size,
n_steps=n_steps,
gamma=gamma,
n_frames=n_frames,
real_ratio=real_ratio,
generated_maxlen=generated_maxlen,
)
self._n_steps_per_epoch = n_steps_per_epoch
def _reset(self) -> None:
pass
def _next(self) -> Transition:
index = cast(int, np.random.randint(len(self._transitions)))
transition = self._transitions[index]
return transition
def _has_finished(self) -> bool:
return self._count >= self._n_steps_per_epoch
def __len__(self) -> int:
return self._n_steps_per_epoch
|
flexible
|
{
"blob_id": "3b9193fcd69b0387222feab96c50bf3617606cdd",
"index": 7329,
"step-1": "<mask token>\n\n\nclass RandomIterator(TransitionIterator):\n _n_steps_per_epoch: int\n <mask token>\n\n def _reset(self) ->None:\n pass\n <mask token>\n\n def _has_finished(self) ->bool:\n return self._count >= self._n_steps_per_epoch\n\n def __len__(self) ->int:\n return self._n_steps_per_epoch\n",
"step-2": "<mask token>\n\n\nclass RandomIterator(TransitionIterator):\n _n_steps_per_epoch: int\n <mask token>\n\n def _reset(self) ->None:\n pass\n\n def _next(self) ->Transition:\n index = cast(int, np.random.randint(len(self._transitions)))\n transition = self._transitions[index]\n return transition\n\n def _has_finished(self) ->bool:\n return self._count >= self._n_steps_per_epoch\n\n def __len__(self) ->int:\n return self._n_steps_per_epoch\n",
"step-3": "<mask token>\n\n\nclass RandomIterator(TransitionIterator):\n _n_steps_per_epoch: int\n\n def __init__(self, transitions: List[Transition], n_steps_per_epoch:\n int, batch_size: int, n_steps: int=1, gamma: float=0.99, n_frames:\n int=1, real_ratio: float=1.0, generated_maxlen: int=100000):\n super().__init__(transitions=transitions, batch_size=batch_size,\n n_steps=n_steps, gamma=gamma, n_frames=n_frames, real_ratio=\n real_ratio, generated_maxlen=generated_maxlen)\n self._n_steps_per_epoch = n_steps_per_epoch\n\n def _reset(self) ->None:\n pass\n\n def _next(self) ->Transition:\n index = cast(int, np.random.randint(len(self._transitions)))\n transition = self._transitions[index]\n return transition\n\n def _has_finished(self) ->bool:\n return self._count >= self._n_steps_per_epoch\n\n def __len__(self) ->int:\n return self._n_steps_per_epoch\n",
"step-4": "from typing import List, cast\nimport numpy as np\nfrom ..dataset import Transition\nfrom .base import TransitionIterator\n\n\nclass RandomIterator(TransitionIterator):\n _n_steps_per_epoch: int\n\n def __init__(self, transitions: List[Transition], n_steps_per_epoch:\n int, batch_size: int, n_steps: int=1, gamma: float=0.99, n_frames:\n int=1, real_ratio: float=1.0, generated_maxlen: int=100000):\n super().__init__(transitions=transitions, batch_size=batch_size,\n n_steps=n_steps, gamma=gamma, n_frames=n_frames, real_ratio=\n real_ratio, generated_maxlen=generated_maxlen)\n self._n_steps_per_epoch = n_steps_per_epoch\n\n def _reset(self) ->None:\n pass\n\n def _next(self) ->Transition:\n index = cast(int, np.random.randint(len(self._transitions)))\n transition = self._transitions[index]\n return transition\n\n def _has_finished(self) ->bool:\n return self._count >= self._n_steps_per_epoch\n\n def __len__(self) ->int:\n return self._n_steps_per_epoch\n",
"step-5": "from typing import List, cast\n\nimport numpy as np\n\nfrom ..dataset import Transition\nfrom .base import TransitionIterator\n\n\nclass RandomIterator(TransitionIterator):\n\n _n_steps_per_epoch: int\n\n def __init__(\n self,\n transitions: List[Transition],\n n_steps_per_epoch: int,\n batch_size: int,\n n_steps: int = 1,\n gamma: float = 0.99,\n n_frames: int = 1,\n real_ratio: float = 1.0,\n generated_maxlen: int = 100000,\n ):\n super().__init__(\n transitions=transitions,\n batch_size=batch_size,\n n_steps=n_steps,\n gamma=gamma,\n n_frames=n_frames,\n real_ratio=real_ratio,\n generated_maxlen=generated_maxlen,\n )\n self._n_steps_per_epoch = n_steps_per_epoch\n\n def _reset(self) -> None:\n pass\n\n def _next(self) -> Transition:\n index = cast(int, np.random.randint(len(self._transitions)))\n transition = self._transitions[index]\n return transition\n\n def _has_finished(self) -> bool:\n return self._count >= self._n_steps_per_epoch\n\n def __len__(self) -> int:\n return self._n_steps_per_epoch\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
setup(**config)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config = {'name': 'beziers', 'author': 'Simon Cozens', 'author_email':
'simon@simon-cozens.org', 'url':
'https://github.com/simoncozens/beziers.py', 'description':
'Bezier curve manipulation library', 'long_description': open(
'README.rst', 'r').read(), 'license': 'MIT', 'version': '0.5.0',
'install_requires': ['pyclipper'], 'classifiers': [
'Programming Language :: Python', 'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Development Status :: 4 - Beta'], 'packages': find_packages()}
if __name__ == '__main__':
setup(**config)
<|reserved_special_token_1|>
from setuptools import setup, find_packages
config = {'name': 'beziers', 'author': 'Simon Cozens', 'author_email':
'simon@simon-cozens.org', 'url':
'https://github.com/simoncozens/beziers.py', 'description':
'Bezier curve manipulation library', 'long_description': open(
'README.rst', 'r').read(), 'license': 'MIT', 'version': '0.5.0',
'install_requires': ['pyclipper'], 'classifiers': [
'Programming Language :: Python', 'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Development Status :: 4 - Beta'], 'packages': find_packages()}
if __name__ == '__main__':
setup(**config)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
config = {
'name': 'beziers',
'author': 'Simon Cozens',
'author_email': 'simon@simon-cozens.org',
'url': 'https://github.com/simoncozens/beziers.py',
'description': 'Bezier curve manipulation library',
'long_description': open('README.rst', 'r').read(),
'license': 'MIT',
'version': '0.5.0',
'install_requires': [
'pyclipper'
],
'classifiers': [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta"
],
'packages': find_packages(),
}
if __name__ == '__main__':
setup(**config)
|
flexible
|
{
"blob_id": "98ddf0be2c38cd9b10dfa9cc09f53907b34c1287",
"index": 7728,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n setup(**config)\n",
"step-3": "<mask token>\nconfig = {'name': 'beziers', 'author': 'Simon Cozens', 'author_email':\n 'simon@simon-cozens.org', 'url':\n 'https://github.com/simoncozens/beziers.py', 'description':\n 'Bezier curve manipulation library', 'long_description': open(\n 'README.rst', 'r').read(), 'license': 'MIT', 'version': '0.5.0',\n 'install_requires': ['pyclipper'], 'classifiers': [\n 'Programming Language :: Python', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 4 - Beta'], 'packages': find_packages()}\nif __name__ == '__main__':\n setup(**config)\n",
"step-4": "from setuptools import setup, find_packages\nconfig = {'name': 'beziers', 'author': 'Simon Cozens', 'author_email':\n 'simon@simon-cozens.org', 'url':\n 'https://github.com/simoncozens/beziers.py', 'description':\n 'Bezier curve manipulation library', 'long_description': open(\n 'README.rst', 'r').read(), 'license': 'MIT', 'version': '0.5.0',\n 'install_requires': ['pyclipper'], 'classifiers': [\n 'Programming Language :: Python', 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 4 - Beta'], 'packages': find_packages()}\nif __name__ == '__main__':\n setup(**config)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\n\nconfig = {\n 'name': 'beziers',\n 'author': 'Simon Cozens',\n 'author_email': 'simon@simon-cozens.org',\n 'url': 'https://github.com/simoncozens/beziers.py',\n 'description': 'Bezier curve manipulation library',\n 'long_description': open('README.rst', 'r').read(),\n 'license': 'MIT',\n 'version': '0.5.0',\n 'install_requires': [\n 'pyclipper'\n ],\n 'classifiers': [\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 4 - Beta\"\n\n ],\n 'packages': find_packages(),\n}\n\nif __name__ == '__main__':\n setup(**config)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='json_config', version='0.0.01', packages=['', 'test'], url='',
license='', author='craig.ferguson', author_email='', description=
'Simple Functional Config For Changing Environments')
<|reserved_special_token_1|>
from distutils.core import setup
setup(name='json_config', version='0.0.01', packages=['', 'test'], url='',
license='', author='craig.ferguson', author_email='', description=
'Simple Functional Config For Changing Environments')
|
flexible
|
{
"blob_id": "ee57e6a1ccbec93f3def8966f5621ea459f3d228",
"index": 6538,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='json_config', version='0.0.01', packages=['', 'test'], url='',\n license='', author='craig.ferguson', author_email='', description=\n 'Simple Functional Config For Changing Environments')\n",
"step-3": "from distutils.core import setup\nsetup(name='json_config', version='0.0.01', packages=['', 'test'], url='',\n license='', author='craig.ferguson', author_email='', description=\n 'Simple Functional Config For Changing Environments')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def LevelToIntensity(NoiseLevelIndB):
I0 = 10.0 ** -12
NoiseLevel = float(NoiseLevelIndB)
Intensity = I0 * 10 ** (NoiseLevel / 10)
return Intensity
def IntensityToLevel(Intensity):
I0 = 10.0 ** -12
Intensity = Intensity
NoiseLevelIndB = 10 * np.log10(Intensity / I0)
return NoiseLevelIndB
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def LevelToIntensity(NoiseLevelIndB):
I0 = 10.0 ** -12
NoiseLevel = float(NoiseLevelIndB)
Intensity = I0 * 10 ** (NoiseLevel / 10)
return Intensity
def IntensityToLevel(Intensity):
I0 = 10.0 ** -12
Intensity = Intensity
NoiseLevelIndB = 10 * np.log10(Intensity / I0)
return NoiseLevelIndB
<|reserved_special_token_0|>
plt.close('all')
<|reserved_special_token_0|>
cbar.set_label('Sound level in dB', rotation=270)
plt.xlabel('Distance (m)')
plt.ylabel('Spacing (m)')
plt.title(
"""Sound level in function of distance and spacing
with a velocity of 9.25 m/s for WM6"""
, fontweight='bold')
plt.minorticks_on()
plt.grid(b=True, which='major', linewidth=2)
plt.grid(b=True, which='minor')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def LevelToIntensity(NoiseLevelIndB):
I0 = 10.0 ** -12
NoiseLevel = float(NoiseLevelIndB)
Intensity = I0 * 10 ** (NoiseLevel / 10)
return Intensity
def IntensityToLevel(Intensity):
I0 = 10.0 ** -12
Intensity = Intensity
NoiseLevelIndB = 10 * np.log10(Intensity / I0)
return NoiseLevelIndB
coth = lambda x: (e ** x - e ** -x) / (e ** x - e ** -x)
plt.close('all')
SLHighway10 = 53.5
d1 = 10.0
b = np.arange(0.1, 150, 0.5)
d = np.arange(0.1, 150, 0.5)
b, d = np.meshgrid(b, d)
Vmax = 9.25
IntensityTurbine40cm = lambda V: 4 * 10 ** -6 * e ** (0.2216 * V)
IntensityIndividualTurbine = IntensityTurbine40cm(Vmax)
PowerIndividual = IntensityIndividualTurbine * pi * 0.16 * 4
SoundPowerHighway = LevelToIntensity(SLHighway10) * pi * d1 ** 2 * 4
Intensity = PowerIndividual / (4 * b * d) * coth(d / b * pi
) + SoundPowerHighway / (4 * pi * (d + d1) ** 2)
SL = IntensityToLevel(Intensity)
levels = [41.0, 47.0]
fig = plt.figure()
CS = plt.contourf(d, b, SL, levels, cmap=cm.Greys)
cbar = plt.colorbar()
cbar.set_label('Sound level in dB', rotation=270)
plt.xlabel('Distance (m)')
plt.ylabel('Spacing (m)')
plt.title(
"""Sound level in function of distance and spacing
with a velocity of 9.25 m/s for WM6"""
, fontweight='bold')
plt.minorticks_on()
plt.grid(b=True, which='major', linewidth=2)
plt.grid(b=True, which='minor')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from math import pi, e
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def LevelToIntensity(NoiseLevelIndB):
I0 = 10.0 ** -12
NoiseLevel = float(NoiseLevelIndB)
Intensity = I0 * 10 ** (NoiseLevel / 10)
return Intensity
def IntensityToLevel(Intensity):
I0 = 10.0 ** -12
Intensity = Intensity
NoiseLevelIndB = 10 * np.log10(Intensity / I0)
return NoiseLevelIndB
coth = lambda x: (e ** x - e ** -x) / (e ** x - e ** -x)
plt.close('all')
SLHighway10 = 53.5
d1 = 10.0
b = np.arange(0.1, 150, 0.5)
d = np.arange(0.1, 150, 0.5)
b, d = np.meshgrid(b, d)
Vmax = 9.25
IntensityTurbine40cm = lambda V: 4 * 10 ** -6 * e ** (0.2216 * V)
IntensityIndividualTurbine = IntensityTurbine40cm(Vmax)
PowerIndividual = IntensityIndividualTurbine * pi * 0.16 * 4
SoundPowerHighway = LevelToIntensity(SLHighway10) * pi * d1 ** 2 * 4
Intensity = PowerIndividual / (4 * b * d) * coth(d / b * pi
) + SoundPowerHighway / (4 * pi * (d + d1) ** 2)
SL = IntensityToLevel(Intensity)
levels = [41.0, 47.0]
fig = plt.figure()
CS = plt.contourf(d, b, SL, levels, cmap=cm.Greys)
cbar = plt.colorbar()
cbar.set_label('Sound level in dB', rotation=270)
plt.xlabel('Distance (m)')
plt.ylabel('Spacing (m)')
plt.title(
"""Sound level in function of distance and spacing
with a velocity of 9.25 m/s for WM6"""
, fontweight='bold')
plt.minorticks_on()
plt.grid(b=True, which='major', linewidth=2)
plt.grid(b=True, which='minor')
plt.show()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 13:34:46 2017
@author: Sven Geboers
"""
from math import pi,e
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def LevelToIntensity(NoiseLevelIndB):
I0 = 10.**(-12) #This is the treshold hearing intensity, matching 0 dB
NoiseLevel = float(NoiseLevelIndB)
Intensity = I0*10**(NoiseLevel/10)
return Intensity
def IntensityToLevel(Intensity):
I0 = 10.**(-12) #This is the treshold hearing intensity, matching 0 dB
Intensity = Intensity
NoiseLevelIndB = 10*np.log10(Intensity/I0)
return NoiseLevelIndB
#Definine the mathematical function coth(x)
coth = lambda x: (e**(x)-e**(-x))/(e**(x)-e**(-x)) #np.cosh(x)/np.sinh(x)
#Closes all previous plots so that we don't have to click them away manually
plt.close('all')
#Defining some constants:
SLHighway10 = 53.5 #dB, this is the sound level of a highway at 10 m distance
d1 = 10. #m, distance between the highway and the sound barrier
#Creating data mesh
b = np.arange(0.1, 150, 0.5)
d = np.arange(0.1, 150, 0.5)
b, d = np.meshgrid(b, d)
#Calculating maximum velocity and individual sound power
Vmax = 9.25 #m/s
IntensityTurbine40cm = lambda V: 4*10**(-6)*e**(0.2216*V)
IntensityIndividualTurbine = IntensityTurbine40cm(Vmax)
PowerIndividual = IntensityIndividualTurbine*pi*0.16 * 4
SoundPowerHighway = LevelToIntensity(SLHighway10)*pi*d1**2 * 4
#Calculating intensity and sound level
Intensity = PowerIndividual/(4*b*d)*coth(d/b*pi)+SoundPowerHighway/(4*pi*(d+d1)**2)
SL = IntensityToLevel(Intensity)
#Plots contour curve
levels = [41.,47.] #Contour levels that will be shown
fig = plt.figure()
CS = plt.contourf(d, b, SL, levels,cmap=cm.Greys)
cbar=plt.colorbar()
cbar.set_label('Sound level in dB', rotation=270)
plt.xlabel('Distance (m)')
plt.ylabel('Spacing (m)')
plt.title('Sound level in function of distance and spacing \n with a velocity of 9.25 m/s for WM6',fontweight='bold')
plt.minorticks_on()
plt.grid(b=True, which='major',linewidth=2)
plt.grid(b=True, which='minor')
plt.show()
|
flexible
|
{
"blob_id": "68a1d5a77abd19aece04bd560df121ceddccea42",
"index": 3179,
"step-1": "<mask token>\n\n\ndef LevelToIntensity(NoiseLevelIndB):\n I0 = 10.0 ** -12\n NoiseLevel = float(NoiseLevelIndB)\n Intensity = I0 * 10 ** (NoiseLevel / 10)\n return Intensity\n\n\ndef IntensityToLevel(Intensity):\n I0 = 10.0 ** -12\n Intensity = Intensity\n NoiseLevelIndB = 10 * np.log10(Intensity / I0)\n return NoiseLevelIndB\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef LevelToIntensity(NoiseLevelIndB):\n I0 = 10.0 ** -12\n NoiseLevel = float(NoiseLevelIndB)\n Intensity = I0 * 10 ** (NoiseLevel / 10)\n return Intensity\n\n\ndef IntensityToLevel(Intensity):\n I0 = 10.0 ** -12\n Intensity = Intensity\n NoiseLevelIndB = 10 * np.log10(Intensity / I0)\n return NoiseLevelIndB\n\n\n<mask token>\nplt.close('all')\n<mask token>\ncbar.set_label('Sound level in dB', rotation=270)\nplt.xlabel('Distance (m)')\nplt.ylabel('Spacing (m)')\nplt.title(\n \"\"\"Sound level in function of distance and spacing \n with a velocity of 9.25 m/s for WM6\"\"\"\n , fontweight='bold')\nplt.minorticks_on()\nplt.grid(b=True, which='major', linewidth=2)\nplt.grid(b=True, which='minor')\nplt.show()\n",
"step-3": "<mask token>\n\n\ndef LevelToIntensity(NoiseLevelIndB):\n I0 = 10.0 ** -12\n NoiseLevel = float(NoiseLevelIndB)\n Intensity = I0 * 10 ** (NoiseLevel / 10)\n return Intensity\n\n\ndef IntensityToLevel(Intensity):\n I0 = 10.0 ** -12\n Intensity = Intensity\n NoiseLevelIndB = 10 * np.log10(Intensity / I0)\n return NoiseLevelIndB\n\n\ncoth = lambda x: (e ** x - e ** -x) / (e ** x - e ** -x)\nplt.close('all')\nSLHighway10 = 53.5\nd1 = 10.0\nb = np.arange(0.1, 150, 0.5)\nd = np.arange(0.1, 150, 0.5)\nb, d = np.meshgrid(b, d)\nVmax = 9.25\nIntensityTurbine40cm = lambda V: 4 * 10 ** -6 * e ** (0.2216 * V)\nIntensityIndividualTurbine = IntensityTurbine40cm(Vmax)\nPowerIndividual = IntensityIndividualTurbine * pi * 0.16 * 4\nSoundPowerHighway = LevelToIntensity(SLHighway10) * pi * d1 ** 2 * 4\nIntensity = PowerIndividual / (4 * b * d) * coth(d / b * pi\n ) + SoundPowerHighway / (4 * pi * (d + d1) ** 2)\nSL = IntensityToLevel(Intensity)\nlevels = [41.0, 47.0]\nfig = plt.figure()\nCS = plt.contourf(d, b, SL, levels, cmap=cm.Greys)\ncbar = plt.colorbar()\ncbar.set_label('Sound level in dB', rotation=270)\nplt.xlabel('Distance (m)')\nplt.ylabel('Spacing (m)')\nplt.title(\n \"\"\"Sound level in function of distance and spacing \n with a velocity of 9.25 m/s for WM6\"\"\"\n , fontweight='bold')\nplt.minorticks_on()\nplt.grid(b=True, which='major', linewidth=2)\nplt.grid(b=True, which='minor')\nplt.show()\n",
"step-4": "<mask token>\nfrom math import pi, e\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\n\ndef LevelToIntensity(NoiseLevelIndB):\n I0 = 10.0 ** -12\n NoiseLevel = float(NoiseLevelIndB)\n Intensity = I0 * 10 ** (NoiseLevel / 10)\n return Intensity\n\n\ndef IntensityToLevel(Intensity):\n I0 = 10.0 ** -12\n Intensity = Intensity\n NoiseLevelIndB = 10 * np.log10(Intensity / I0)\n return NoiseLevelIndB\n\n\ncoth = lambda x: (e ** x - e ** -x) / (e ** x - e ** -x)\nplt.close('all')\nSLHighway10 = 53.5\nd1 = 10.0\nb = np.arange(0.1, 150, 0.5)\nd = np.arange(0.1, 150, 0.5)\nb, d = np.meshgrid(b, d)\nVmax = 9.25\nIntensityTurbine40cm = lambda V: 4 * 10 ** -6 * e ** (0.2216 * V)\nIntensityIndividualTurbine = IntensityTurbine40cm(Vmax)\nPowerIndividual = IntensityIndividualTurbine * pi * 0.16 * 4\nSoundPowerHighway = LevelToIntensity(SLHighway10) * pi * d1 ** 2 * 4\nIntensity = PowerIndividual / (4 * b * d) * coth(d / b * pi\n ) + SoundPowerHighway / (4 * pi * (d + d1) ** 2)\nSL = IntensityToLevel(Intensity)\nlevels = [41.0, 47.0]\nfig = plt.figure()\nCS = plt.contourf(d, b, SL, levels, cmap=cm.Greys)\ncbar = plt.colorbar()\ncbar.set_label('Sound level in dB', rotation=270)\nplt.xlabel('Distance (m)')\nplt.ylabel('Spacing (m)')\nplt.title(\n \"\"\"Sound level in function of distance and spacing \n with a velocity of 9.25 m/s for WM6\"\"\"\n , fontweight='bold')\nplt.minorticks_on()\nplt.grid(b=True, which='major', linewidth=2)\nplt.grid(b=True, which='minor')\nplt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 25 13:34:46 2017\r\n\r\n@author: Sven Geboers\r\n\"\"\"\r\n\r\nfrom math import pi,e\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\n\r\ndef LevelToIntensity(NoiseLevelIndB):\r\n I0 = 10.**(-12) #This is the treshold hearing intensity, matching 0 dB\r\n NoiseLevel = float(NoiseLevelIndB)\r\n Intensity = I0*10**(NoiseLevel/10)\r\n return Intensity\r\n \r\ndef IntensityToLevel(Intensity):\r\n I0 = 10.**(-12) #This is the treshold hearing intensity, matching 0 dB\r\n Intensity = Intensity\r\n NoiseLevelIndB = 10*np.log10(Intensity/I0)\r\n return NoiseLevelIndB\r\n \r\n#Definine the mathematical function coth(x)\r\ncoth = lambda x: (e**(x)-e**(-x))/(e**(x)-e**(-x)) #np.cosh(x)/np.sinh(x)\r\n\r\n#Closes all previous plots so that we don't have to click them away manually\r\nplt.close('all')\r\n\r\n#Defining some constants:\r\nSLHighway10 = 53.5 #dB, this is the sound level of a highway at 10 m distance\r\nd1 = 10. #m, distance between the highway and the sound barrier\r\n\r\n#Creating data mesh \r\nb = np.arange(0.1, 150, 0.5)\r\nd = np.arange(0.1, 150, 0.5)\r\nb, d = np.meshgrid(b, d)\r\n\r\n#Calculating maximum velocity and individual sound power\r\nVmax = 9.25 #m/s\r\nIntensityTurbine40cm = lambda V: 4*10**(-6)*e**(0.2216*V)\r\nIntensityIndividualTurbine = IntensityTurbine40cm(Vmax)\r\nPowerIndividual = IntensityIndividualTurbine*pi*0.16 * 4\r\nSoundPowerHighway = LevelToIntensity(SLHighway10)*pi*d1**2 * 4\r\n\r\n#Calculating intensity and sound level\r\nIntensity = PowerIndividual/(4*b*d)*coth(d/b*pi)+SoundPowerHighway/(4*pi*(d+d1)**2)\r\nSL = IntensityToLevel(Intensity)\r\n\r\n#Plots contour curve \r\nlevels = [41.,47.] #Contour levels that will be shown\r\nfig = plt.figure()\r\nCS = plt.contourf(d, b, SL, levels,cmap=cm.Greys)\r\ncbar=plt.colorbar()\r\ncbar.set_label('Sound level in dB', rotation=270)\r\nplt.xlabel('Distance (m)')\r\nplt.ylabel('Spacing (m)')\r\nplt.title('Sound level in function of distance and spacing \\n with a velocity of 9.25 m/s for WM6',fontweight='bold')\r\nplt.minorticks_on()\r\nplt.grid(b=True, which='major',linewidth=2)\r\nplt.grid(b=True, which='minor') \r\nplt.show()\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
scaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))
<|reserved_special_token_0|>
for k, v in ISOELECTRIC_POINTS.items():
ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
<|reserved_special_token_0|>
scaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))
<|reserved_special_token_0|>
for k, v in MOLECULAR_WEIGHTS.items():
MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BACKBONE_ATOMS = ['N', 'CA', 'C', 'O']
AMINO_ACIDS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z']
BOND_TYPES = ['hydrophobic', 'disulfide', 'hbond', 'ionic', 'aromatic',
'aromatic_sulphur', 'cation_pi', 'backbone', 'delaunay']
RESI_NAMES = ['ALA', 'ASX', 'CYS', 'ASP', 'GLU', 'PHE', 'GLY', 'HIS', 'ILE',
'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL',
'TRP', 'TYR', 'GLX', 'UNK']
HYDROPHOBIC_RESIS = ['ALA', 'VAL', 'LEU', 'ILE', 'MET', 'PHE', 'TRP', 'PRO',
'TYR']
DISULFIDE_RESIS = ['CYS']
DISULFIDE_ATOMS = ['SG']
IONIC_RESIS = ['ARG', 'LYS', 'HIS', 'ASP', 'GLU']
POS_AA = ['HIS', 'LYS', 'ARG']
NEG_AA = ['GLU', 'ASP']
AA_RING_ATOMS = dict()
AA_RING_ATOMS['PHE'] = ['CG', 'CD', 'CE', 'CZ']
AA_RING_ATOMS['TRP'] = ['CD', 'CE', 'CH', 'CZ']
AA_RING_ATOMS['HIS'] = ['CG', 'CD', 'CE', 'ND', 'NE']
AA_RING_ATOMS['TYR'] = ['CG', 'CD', 'CE', 'CZ']
AROMATIC_RESIS = ['PHE', 'TRP', 'HIS', 'TYR']
CATION_PI_RESIS = ['LYS', 'ARG', 'PHE', 'TYR', 'TRP']
CATION_RESIS = ['LYS', 'ARG']
PI_RESIS = ['PHE', 'TYR', 'TRP']
SULPHUR_RESIS = ['MET', 'CYS']
ISOELECTRIC_POINTS = {'ALA': 6.11, 'ARG': 10.76, 'ASN': 10.76, 'ASP': 2.98,
'CYS': 5.02, 'GLU': 3.08, 'GLN': 5.65, 'GLY': 6.06, 'HIS': 7.64, 'ILE':
6.04, 'LEU': 6.04, 'LYS': 9.74, 'MET': 5.74, 'PHE': 5.91, 'PRO': 6.3,
'SER': 5.68, 'THR': 5.6, 'TRP': 5.88, 'TYR': 5.63, 'VAL': 6.02, 'UNK':
7.0, 'ASX': 6.87, 'GLX': 4.35}
scaler = StandardScaler()
scaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))
ISOELECTRIC_POINTS_STD = dict()
for k, v in ISOELECTRIC_POINTS.items():
ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
MOLECULAR_WEIGHTS = {'ALA': 89.0935, 'ARG': 174.2017, 'ASN': 132.1184,
'ASP': 133.1032, 'CYS': 121.159, 'GLU': 147.1299, 'GLN': 146.1451,
'GLY': 75.0669, 'HIS': 155.1552, 'ILE': 131.1736, 'LEU': 131.1736,
'LYS': 146.1882, 'MET': 149.2124, 'PHE': 165.19, 'PRO': 115.131, 'SER':
105.093, 'THR': 119.1197, 'TRP': 204.2262, 'TYR': 181.1894, 'VAL':
117.1469, 'UNK': 137.1484, 'ASX': 132.6108, 'GLX': 146.6375}
MOLECULAR_WEIGHTS_STD = dict()
scaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))
MOLECULAR_WEIGHTS_STD = dict()
for k, v in MOLECULAR_WEIGHTS.items():
MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from sklearn.preprocessing import StandardScaler
BACKBONE_ATOMS = ['N', 'CA', 'C', 'O']
AMINO_ACIDS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z']
BOND_TYPES = ['hydrophobic', 'disulfide', 'hbond', 'ionic', 'aromatic',
'aromatic_sulphur', 'cation_pi', 'backbone', 'delaunay']
RESI_NAMES = ['ALA', 'ASX', 'CYS', 'ASP', 'GLU', 'PHE', 'GLY', 'HIS', 'ILE',
'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL',
'TRP', 'TYR', 'GLX', 'UNK']
HYDROPHOBIC_RESIS = ['ALA', 'VAL', 'LEU', 'ILE', 'MET', 'PHE', 'TRP', 'PRO',
'TYR']
DISULFIDE_RESIS = ['CYS']
DISULFIDE_ATOMS = ['SG']
IONIC_RESIS = ['ARG', 'LYS', 'HIS', 'ASP', 'GLU']
POS_AA = ['HIS', 'LYS', 'ARG']
NEG_AA = ['GLU', 'ASP']
AA_RING_ATOMS = dict()
AA_RING_ATOMS['PHE'] = ['CG', 'CD', 'CE', 'CZ']
AA_RING_ATOMS['TRP'] = ['CD', 'CE', 'CH', 'CZ']
AA_RING_ATOMS['HIS'] = ['CG', 'CD', 'CE', 'ND', 'NE']
AA_RING_ATOMS['TYR'] = ['CG', 'CD', 'CE', 'CZ']
AROMATIC_RESIS = ['PHE', 'TRP', 'HIS', 'TYR']
CATION_PI_RESIS = ['LYS', 'ARG', 'PHE', 'TYR', 'TRP']
CATION_RESIS = ['LYS', 'ARG']
PI_RESIS = ['PHE', 'TYR', 'TRP']
SULPHUR_RESIS = ['MET', 'CYS']
ISOELECTRIC_POINTS = {'ALA': 6.11, 'ARG': 10.76, 'ASN': 10.76, 'ASP': 2.98,
'CYS': 5.02, 'GLU': 3.08, 'GLN': 5.65, 'GLY': 6.06, 'HIS': 7.64, 'ILE':
6.04, 'LEU': 6.04, 'LYS': 9.74, 'MET': 5.74, 'PHE': 5.91, 'PRO': 6.3,
'SER': 5.68, 'THR': 5.6, 'TRP': 5.88, 'TYR': 5.63, 'VAL': 6.02, 'UNK':
7.0, 'ASX': 6.87, 'GLX': 4.35}
scaler = StandardScaler()
scaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))
ISOELECTRIC_POINTS_STD = dict()
for k, v in ISOELECTRIC_POINTS.items():
ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
MOLECULAR_WEIGHTS = {'ALA': 89.0935, 'ARG': 174.2017, 'ASN': 132.1184,
'ASP': 133.1032, 'CYS': 121.159, 'GLU': 147.1299, 'GLN': 146.1451,
'GLY': 75.0669, 'HIS': 155.1552, 'ILE': 131.1736, 'LEU': 131.1736,
'LYS': 146.1882, 'MET': 149.2124, 'PHE': 165.19, 'PRO': 115.131, 'SER':
105.093, 'THR': 119.1197, 'TRP': 204.2262, 'TYR': 181.1894, 'VAL':
117.1469, 'UNK': 137.1484, 'ASX': 132.6108, 'GLX': 146.6375}
MOLECULAR_WEIGHTS_STD = dict()
scaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))
MOLECULAR_WEIGHTS_STD = dict()
for k, v in MOLECULAR_WEIGHTS.items():
MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
<|reserved_special_token_1|>
"""
Author: Eric J. Ma
Purpose: This is a set of utility variables and functions that can be used
across the PIN project.
"""
import numpy as np
from sklearn.preprocessing import StandardScaler
BACKBONE_ATOMS = ["N", "CA", "C", "O"]
AMINO_ACIDS = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"P",
"Q",
"R",
"S",
"T",
"V",
"W",
"X",
"Y",
"Z",
]
BOND_TYPES = [
"hydrophobic",
"disulfide",
"hbond",
"ionic",
"aromatic",
"aromatic_sulphur",
"cation_pi",
"backbone",
"delaunay",
]
RESI_NAMES = [
"ALA",
"ASX",
"CYS",
"ASP",
"GLU",
"PHE",
"GLY",
"HIS",
"ILE",
"LYS",
"LEU",
"MET",
"ASN",
"PRO",
"GLN",
"ARG",
"SER",
"THR",
"VAL",
"TRP",
"TYR",
"GLX",
"UNK",
]
HYDROPHOBIC_RESIS = [
"ALA",
"VAL",
"LEU",
"ILE",
"MET",
"PHE",
"TRP",
"PRO",
"TYR",
]
DISULFIDE_RESIS = ["CYS"]
DISULFIDE_ATOMS = ["SG"]
IONIC_RESIS = ["ARG", "LYS", "HIS", "ASP", "GLU"]
POS_AA = ["HIS", "LYS", "ARG"]
NEG_AA = ["GLU", "ASP"]
AA_RING_ATOMS = dict()
AA_RING_ATOMS["PHE"] = ["CG", "CD", "CE", "CZ"]
AA_RING_ATOMS["TRP"] = ["CD", "CE", "CH", "CZ"]
AA_RING_ATOMS["HIS"] = ["CG", "CD", "CE", "ND", "NE"]
AA_RING_ATOMS["TYR"] = ["CG", "CD", "CE", "CZ"]
AROMATIC_RESIS = ["PHE", "TRP", "HIS", "TYR"]
CATION_PI_RESIS = ["LYS", "ARG", "PHE", "TYR", "TRP"]
CATION_RESIS = ["LYS", "ARG"]
PI_RESIS = ["PHE", "TYR", "TRP"]
SULPHUR_RESIS = ["MET", "CYS"]
ISOELECTRIC_POINTS = {
"ALA": 6.11,
"ARG": 10.76,
"ASN": 10.76,
"ASP": 2.98,
"CYS": 5.02,
"GLU": 3.08,
"GLN": 5.65,
"GLY": 6.06,
"HIS": 7.64,
"ILE": 6.04,
"LEU": 6.04,
"LYS": 9.74,
"MET": 5.74,
"PHE": 5.91,
"PRO": 6.30,
"SER": 5.68,
"THR": 5.60,
"TRP": 5.88,
"TYR": 5.63,
"VAL": 6.02,
"UNK": 7.00, # unknown so assign neutral
"ASX": 6.87, # the average of D and N
"GLX": 4.35, # the average of E and Q
}
scaler = StandardScaler()
scaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))
ISOELECTRIC_POINTS_STD = dict()
for k, v in ISOELECTRIC_POINTS.items():
ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
MOLECULAR_WEIGHTS = {
"ALA": 89.0935,
"ARG": 174.2017,
"ASN": 132.1184,
"ASP": 133.1032,
"CYS": 121.1590,
"GLU": 147.1299,
"GLN": 146.1451,
"GLY": 75.0669,
"HIS": 155.1552,
"ILE": 131.1736,
"LEU": 131.1736,
"LYS": 146.1882,
"MET": 149.2124,
"PHE": 165.1900,
"PRO": 115.1310,
"SER": 105.0930,
"THR": 119.1197,
"TRP": 204.2262,
"TYR": 181.1894,
"VAL": 117.1469,
"UNK": 137.1484, # unknown, therefore assign average of knowns
"ASX": 132.6108, # the average of D and N
"GLX": 146.6375, # the average of E and Q
}
MOLECULAR_WEIGHTS_STD = dict()
scaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))
MOLECULAR_WEIGHTS_STD = dict()
for k, v in MOLECULAR_WEIGHTS.items():
MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
|
flexible
|
{
"blob_id": "330df4f194deec521f7db0389f88171d9e2aac40",
"index": 2384,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nscaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))\n<mask token>\nfor k, v in ISOELECTRIC_POINTS.items():\n ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n<mask token>\nscaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))\n<mask token>\nfor k, v in MOLECULAR_WEIGHTS.items():\n MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n",
"step-3": "<mask token>\nBACKBONE_ATOMS = ['N', 'CA', 'C', 'O']\nAMINO_ACIDS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',\n 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z']\nBOND_TYPES = ['hydrophobic', 'disulfide', 'hbond', 'ionic', 'aromatic',\n 'aromatic_sulphur', 'cation_pi', 'backbone', 'delaunay']\nRESI_NAMES = ['ALA', 'ASX', 'CYS', 'ASP', 'GLU', 'PHE', 'GLY', 'HIS', 'ILE',\n 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL',\n 'TRP', 'TYR', 'GLX', 'UNK']\nHYDROPHOBIC_RESIS = ['ALA', 'VAL', 'LEU', 'ILE', 'MET', 'PHE', 'TRP', 'PRO',\n 'TYR']\nDISULFIDE_RESIS = ['CYS']\nDISULFIDE_ATOMS = ['SG']\nIONIC_RESIS = ['ARG', 'LYS', 'HIS', 'ASP', 'GLU']\nPOS_AA = ['HIS', 'LYS', 'ARG']\nNEG_AA = ['GLU', 'ASP']\nAA_RING_ATOMS = dict()\nAA_RING_ATOMS['PHE'] = ['CG', 'CD', 'CE', 'CZ']\nAA_RING_ATOMS['TRP'] = ['CD', 'CE', 'CH', 'CZ']\nAA_RING_ATOMS['HIS'] = ['CG', 'CD', 'CE', 'ND', 'NE']\nAA_RING_ATOMS['TYR'] = ['CG', 'CD', 'CE', 'CZ']\nAROMATIC_RESIS = ['PHE', 'TRP', 'HIS', 'TYR']\nCATION_PI_RESIS = ['LYS', 'ARG', 'PHE', 'TYR', 'TRP']\nCATION_RESIS = ['LYS', 'ARG']\nPI_RESIS = ['PHE', 'TYR', 'TRP']\nSULPHUR_RESIS = ['MET', 'CYS']\nISOELECTRIC_POINTS = {'ALA': 6.11, 'ARG': 10.76, 'ASN': 10.76, 'ASP': 2.98,\n 'CYS': 5.02, 'GLU': 3.08, 'GLN': 5.65, 'GLY': 6.06, 'HIS': 7.64, 'ILE':\n 6.04, 'LEU': 6.04, 'LYS': 9.74, 'MET': 5.74, 'PHE': 5.91, 'PRO': 6.3,\n 'SER': 5.68, 'THR': 5.6, 'TRP': 5.88, 'TYR': 5.63, 'VAL': 6.02, 'UNK': \n 7.0, 'ASX': 6.87, 'GLX': 4.35}\nscaler = StandardScaler()\nscaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))\nISOELECTRIC_POINTS_STD = dict()\nfor k, v in ISOELECTRIC_POINTS.items():\n ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\nMOLECULAR_WEIGHTS = {'ALA': 89.0935, 'ARG': 174.2017, 'ASN': 132.1184,\n 'ASP': 133.1032, 'CYS': 121.159, 'GLU': 147.1299, 'GLN': 146.1451,\n 'GLY': 75.0669, 'HIS': 155.1552, 'ILE': 131.1736, 'LEU': 131.1736,\n 'LYS': 146.1882, 'MET': 149.2124, 'PHE': 165.19, 'PRO': 115.131, 'SER':\n 105.093, 'THR': 119.1197, 'TRP': 204.2262, 'TYR': 181.1894, 'VAL': \n 117.1469, 'UNK': 137.1484, 'ASX': 132.6108, 'GLX': 146.6375}\nMOLECULAR_WEIGHTS_STD = dict()\nscaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))\nMOLECULAR_WEIGHTS_STD = dict()\nfor k, v in MOLECULAR_WEIGHTS.items():\n MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n",
"step-4": "<mask token>\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nBACKBONE_ATOMS = ['N', 'CA', 'C', 'O']\nAMINO_ACIDS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',\n 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z']\nBOND_TYPES = ['hydrophobic', 'disulfide', 'hbond', 'ionic', 'aromatic',\n 'aromatic_sulphur', 'cation_pi', 'backbone', 'delaunay']\nRESI_NAMES = ['ALA', 'ASX', 'CYS', 'ASP', 'GLU', 'PHE', 'GLY', 'HIS', 'ILE',\n 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL',\n 'TRP', 'TYR', 'GLX', 'UNK']\nHYDROPHOBIC_RESIS = ['ALA', 'VAL', 'LEU', 'ILE', 'MET', 'PHE', 'TRP', 'PRO',\n 'TYR']\nDISULFIDE_RESIS = ['CYS']\nDISULFIDE_ATOMS = ['SG']\nIONIC_RESIS = ['ARG', 'LYS', 'HIS', 'ASP', 'GLU']\nPOS_AA = ['HIS', 'LYS', 'ARG']\nNEG_AA = ['GLU', 'ASP']\nAA_RING_ATOMS = dict()\nAA_RING_ATOMS['PHE'] = ['CG', 'CD', 'CE', 'CZ']\nAA_RING_ATOMS['TRP'] = ['CD', 'CE', 'CH', 'CZ']\nAA_RING_ATOMS['HIS'] = ['CG', 'CD', 'CE', 'ND', 'NE']\nAA_RING_ATOMS['TYR'] = ['CG', 'CD', 'CE', 'CZ']\nAROMATIC_RESIS = ['PHE', 'TRP', 'HIS', 'TYR']\nCATION_PI_RESIS = ['LYS', 'ARG', 'PHE', 'TYR', 'TRP']\nCATION_RESIS = ['LYS', 'ARG']\nPI_RESIS = ['PHE', 'TYR', 'TRP']\nSULPHUR_RESIS = ['MET', 'CYS']\nISOELECTRIC_POINTS = {'ALA': 6.11, 'ARG': 10.76, 'ASN': 10.76, 'ASP': 2.98,\n 'CYS': 5.02, 'GLU': 3.08, 'GLN': 5.65, 'GLY': 6.06, 'HIS': 7.64, 'ILE':\n 6.04, 'LEU': 6.04, 'LYS': 9.74, 'MET': 5.74, 'PHE': 5.91, 'PRO': 6.3,\n 'SER': 5.68, 'THR': 5.6, 'TRP': 5.88, 'TYR': 5.63, 'VAL': 6.02, 'UNK': \n 7.0, 'ASX': 6.87, 'GLX': 4.35}\nscaler = StandardScaler()\nscaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))\nISOELECTRIC_POINTS_STD = dict()\nfor k, v in ISOELECTRIC_POINTS.items():\n ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\nMOLECULAR_WEIGHTS = {'ALA': 89.0935, 'ARG': 174.2017, 'ASN': 132.1184,\n 'ASP': 133.1032, 'CYS': 121.159, 'GLU': 147.1299, 'GLN': 146.1451,\n 'GLY': 75.0669, 'HIS': 155.1552, 'ILE': 131.1736, 'LEU': 131.1736,\n 'LYS': 146.1882, 'MET': 149.2124, 'PHE': 165.19, 'PRO': 115.131, 'SER':\n 105.093, 'THR': 119.1197, 'TRP': 204.2262, 'TYR': 181.1894, 'VAL': \n 117.1469, 'UNK': 137.1484, 'ASX': 132.6108, 'GLX': 146.6375}\nMOLECULAR_WEIGHTS_STD = dict()\nscaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))\nMOLECULAR_WEIGHTS_STD = dict()\nfor k, v in MOLECULAR_WEIGHTS.items():\n MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n",
"step-5": "\"\"\"\nAuthor: Eric J. Ma\n\nPurpose: This is a set of utility variables and functions that can be used\nacross the PIN project.\n\"\"\"\n\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\nBACKBONE_ATOMS = [\"N\", \"CA\", \"C\", \"O\"]\n\nAMINO_ACIDS = [\n \"A\",\n \"B\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n \"G\",\n \"H\",\n \"I\",\n \"J\",\n \"K\",\n \"L\",\n \"M\",\n \"N\",\n \"P\",\n \"Q\",\n \"R\",\n \"S\",\n \"T\",\n \"V\",\n \"W\",\n \"X\",\n \"Y\",\n \"Z\",\n]\n\nBOND_TYPES = [\n \"hydrophobic\",\n \"disulfide\",\n \"hbond\",\n \"ionic\",\n \"aromatic\",\n \"aromatic_sulphur\",\n \"cation_pi\",\n \"backbone\",\n \"delaunay\",\n]\n\nRESI_NAMES = [\n \"ALA\",\n \"ASX\",\n \"CYS\",\n \"ASP\",\n \"GLU\",\n \"PHE\",\n \"GLY\",\n \"HIS\",\n \"ILE\",\n \"LYS\",\n \"LEU\",\n \"MET\",\n \"ASN\",\n \"PRO\",\n \"GLN\",\n \"ARG\",\n \"SER\",\n \"THR\",\n \"VAL\",\n \"TRP\",\n \"TYR\",\n \"GLX\",\n \"UNK\",\n]\n\nHYDROPHOBIC_RESIS = [\n \"ALA\",\n \"VAL\",\n \"LEU\",\n \"ILE\",\n \"MET\",\n \"PHE\",\n \"TRP\",\n \"PRO\",\n \"TYR\",\n]\n\nDISULFIDE_RESIS = [\"CYS\"]\n\nDISULFIDE_ATOMS = [\"SG\"]\n\nIONIC_RESIS = [\"ARG\", \"LYS\", \"HIS\", \"ASP\", \"GLU\"]\n\nPOS_AA = [\"HIS\", \"LYS\", \"ARG\"]\n\nNEG_AA = [\"GLU\", \"ASP\"]\n\nAA_RING_ATOMS = dict()\nAA_RING_ATOMS[\"PHE\"] = [\"CG\", \"CD\", \"CE\", \"CZ\"]\nAA_RING_ATOMS[\"TRP\"] = [\"CD\", \"CE\", \"CH\", \"CZ\"]\nAA_RING_ATOMS[\"HIS\"] = [\"CG\", \"CD\", \"CE\", \"ND\", \"NE\"]\nAA_RING_ATOMS[\"TYR\"] = [\"CG\", \"CD\", \"CE\", \"CZ\"]\n\nAROMATIC_RESIS = [\"PHE\", \"TRP\", \"HIS\", \"TYR\"]\n\nCATION_PI_RESIS = [\"LYS\", \"ARG\", \"PHE\", \"TYR\", \"TRP\"]\n\nCATION_RESIS = [\"LYS\", \"ARG\"]\n\nPI_RESIS = [\"PHE\", \"TYR\", \"TRP\"]\n\nSULPHUR_RESIS = [\"MET\", \"CYS\"]\n\nISOELECTRIC_POINTS = {\n \"ALA\": 6.11,\n \"ARG\": 10.76,\n \"ASN\": 10.76,\n \"ASP\": 2.98,\n \"CYS\": 5.02,\n \"GLU\": 3.08,\n \"GLN\": 5.65,\n \"GLY\": 6.06,\n \"HIS\": 7.64,\n \"ILE\": 6.04,\n \"LEU\": 6.04,\n \"LYS\": 9.74,\n \"MET\": 5.74,\n \"PHE\": 5.91,\n \"PRO\": 6.30,\n \"SER\": 5.68,\n \"THR\": 5.60,\n \"TRP\": 5.88,\n \"TYR\": 5.63,\n \"VAL\": 6.02,\n \"UNK\": 7.00, # unknown so assign neutral\n \"ASX\": 6.87, # the average of D and N\n \"GLX\": 4.35, # the average of E and Q\n}\n\nscaler = StandardScaler()\nscaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))\n\nISOELECTRIC_POINTS_STD = dict()\nfor k, v in ISOELECTRIC_POINTS.items():\n ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n\nMOLECULAR_WEIGHTS = {\n \"ALA\": 89.0935,\n \"ARG\": 174.2017,\n \"ASN\": 132.1184,\n \"ASP\": 133.1032,\n \"CYS\": 121.1590,\n \"GLU\": 147.1299,\n \"GLN\": 146.1451,\n \"GLY\": 75.0669,\n \"HIS\": 155.1552,\n \"ILE\": 131.1736,\n \"LEU\": 131.1736,\n \"LYS\": 146.1882,\n \"MET\": 149.2124,\n \"PHE\": 165.1900,\n \"PRO\": 115.1310,\n \"SER\": 105.0930,\n \"THR\": 119.1197,\n \"TRP\": 204.2262,\n \"TYR\": 181.1894,\n \"VAL\": 117.1469,\n \"UNK\": 137.1484, # unknown, therefore assign average of knowns\n \"ASX\": 132.6108, # the average of D and N\n \"GLX\": 146.6375, # the average of E and Q\n}\n\nMOLECULAR_WEIGHTS_STD = dict()\n\nscaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))\nMOLECULAR_WEIGHTS_STD = dict()\nfor k, v in MOLECULAR_WEIGHTS.items():\n MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import subprocess
import re
# import fcntl
# path_ffmpeg =
path_ffmpeg = r'C:\work\ffmpeg\ffmpeg-3.4.2-win64-static\bin\ffmpeg.exe'
dir_ts_files = r'E:\ts'
dir_output = r'E:\hb'
path_lock = r'C:\work\ffmpeg\encode.lock'
def get_work_base(f_base):
re_num = re.compile(r'(\d+)\-.+')
r = re_num.search(f_base)
res = ''
if r:
print(r.group(1))
res = r.group(1)
else:
print('error... cant find f_base.')
return res
def chk_tmp_file(f_tmp):
pass
def get_file_base(path):
fname = os.path.basename(path)
return fname.split('.ts')[0]
def _exec_transcode(path):
f_base = get_file_base(path)
work_base = get_work_base(f_base)
f_in = os.path.join(dir_ts_files, f_base + '.ts')
f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')
f_out = os.path.join(dir_output, f_base + '.m4v')
#opt = '-i %(f_in)s -vf scale=720:-1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'
#opt = '-i %(f_in)s -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -g 1 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'
opt = '-i "%(f_in)s" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit "%(f_tmp)s"'
#opt = '-i %(f_in)s -vf scale=840:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 26 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'
enc_args = opt % vars()
if os.path.exists(f_tmp):
os.remove(f_tmp)
# cmd = ' '.join([ffmpeg, f_in, f_out])
cmd = path_ffmpeg + ' ' + enc_args
print(cmd)
# res = subprocess.run([ffmpeg, enc_args], stdout=subprocess.PIPE)
res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
#return res
if res and res.returncode == 0:
print('Well done. encoding .....')
print('rename')
os.rename(f_tmp, f_out)
else:
print('failed ..... remove tmp-file')
os.remove(f_tmp)
return
def del_files():
pass
def itr_ts_files():
# [print(path) for path in os.listdir(dir_ts) if path.endswith('.ts')]
for path in os.listdir(dir_ts_files):
if path.endswith('.ts'):
yield path
def transcode():
# 複数プロセス起動を防ぐためファイルロックを利用。x-modeでopen (for Windows)
#os.remove(path_lock)
#return
try:
with open(path_lock, 'x') as fl:
for path in itr_ts_files():
print('\nStart transcode [%s]' % path)
_exec_transcode(path)
os.remove(path_lock)
print('Finish transcode [%s]' % path)
except FileExistsError as e:
# print('ロックを獲得できませんでした... エンコード中')
print(e)
return
# TODO
# tmp-file をロック。できなければ終了。
# f_out があれば、終了。
# f_in を削除 (正常終了の場合)
# ts ファイルのステータスチェック(録画済か録画中か?)
# ts ファイルをみつけて、エンコード開始
if __name__ == '__main__':
transcode()
|
normal
|
{
"blob_id": "502e2d2222863236a42512ffc98c2cc9deaf454f",
"index": 7058,
"step-1": "<mask token>\n\n\ndef chk_tmp_file(f_tmp):\n pass\n\n\ndef get_file_base(path):\n fname = os.path.basename(path)\n return fname.split('.ts')[0]\n\n\ndef _exec_transcode(path):\n f_base = get_file_base(path)\n work_base = get_work_base(f_base)\n f_in = os.path.join(dir_ts_files, f_base + '.ts')\n f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')\n f_out = os.path.join(dir_output, f_base + '.m4v')\n opt = (\n '-i \"%(f_in)s\" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit \"%(f_tmp)s\"'\n )\n enc_args = opt % vars()\n if os.path.exists(f_tmp):\n os.remove(f_tmp)\n cmd = path_ffmpeg + ' ' + enc_args\n print(cmd)\n res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)\n if res and res.returncode == 0:\n print('Well done. encoding .....')\n print('rename')\n os.rename(f_tmp, f_out)\n else:\n print('failed ..... remove tmp-file')\n os.remove(f_tmp)\n return\n\n\ndef del_files():\n pass\n\n\ndef itr_ts_files():\n for path in os.listdir(dir_ts_files):\n if path.endswith('.ts'):\n yield path\n\n\ndef transcode():\n try:\n with open(path_lock, 'x') as fl:\n for path in itr_ts_files():\n print('\\nStart transcode [%s]' % path)\n _exec_transcode(path)\n os.remove(path_lock)\n print('Finish transcode [%s]' % path)\n except FileExistsError as e:\n print(e)\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_work_base(f_base):\n re_num = re.compile('(\\\\d+)\\\\-.+')\n r = re_num.search(f_base)\n res = ''\n if r:\n print(r.group(1))\n res = r.group(1)\n else:\n print('error... cant find f_base.')\n return res\n\n\ndef chk_tmp_file(f_tmp):\n pass\n\n\ndef get_file_base(path):\n fname = os.path.basename(path)\n return fname.split('.ts')[0]\n\n\ndef _exec_transcode(path):\n f_base = get_file_base(path)\n work_base = get_work_base(f_base)\n f_in = os.path.join(dir_ts_files, f_base + '.ts')\n f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')\n f_out = os.path.join(dir_output, f_base + '.m4v')\n opt = (\n '-i \"%(f_in)s\" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit \"%(f_tmp)s\"'\n )\n enc_args = opt % vars()\n if os.path.exists(f_tmp):\n os.remove(f_tmp)\n cmd = path_ffmpeg + ' ' + enc_args\n print(cmd)\n res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)\n if res and res.returncode == 0:\n print('Well done. encoding .....')\n print('rename')\n os.rename(f_tmp, f_out)\n else:\n print('failed ..... remove tmp-file')\n os.remove(f_tmp)\n return\n\n\ndef del_files():\n pass\n\n\ndef itr_ts_files():\n for path in os.listdir(dir_ts_files):\n if path.endswith('.ts'):\n yield path\n\n\ndef transcode():\n try:\n with open(path_lock, 'x') as fl:\n for path in itr_ts_files():\n print('\\nStart transcode [%s]' % path)\n _exec_transcode(path)\n os.remove(path_lock)\n print('Finish transcode [%s]' % path)\n except FileExistsError as e:\n print(e)\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_work_base(f_base):\n re_num = re.compile('(\\\\d+)\\\\-.+')\n r = re_num.search(f_base)\n res = ''\n if r:\n print(r.group(1))\n res = r.group(1)\n else:\n print('error... cant find f_base.')\n return res\n\n\ndef chk_tmp_file(f_tmp):\n pass\n\n\ndef get_file_base(path):\n fname = os.path.basename(path)\n return fname.split('.ts')[0]\n\n\ndef _exec_transcode(path):\n f_base = get_file_base(path)\n work_base = get_work_base(f_base)\n f_in = os.path.join(dir_ts_files, f_base + '.ts')\n f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')\n f_out = os.path.join(dir_output, f_base + '.m4v')\n opt = (\n '-i \"%(f_in)s\" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit \"%(f_tmp)s\"'\n )\n enc_args = opt % vars()\n if os.path.exists(f_tmp):\n os.remove(f_tmp)\n cmd = path_ffmpeg + ' ' + enc_args\n print(cmd)\n res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)\n if res and res.returncode == 0:\n print('Well done. encoding .....')\n print('rename')\n os.rename(f_tmp, f_out)\n else:\n print('failed ..... remove tmp-file')\n os.remove(f_tmp)\n return\n\n\ndef del_files():\n pass\n\n\ndef itr_ts_files():\n for path in os.listdir(dir_ts_files):\n if path.endswith('.ts'):\n yield path\n\n\ndef transcode():\n try:\n with open(path_lock, 'x') as fl:\n for path in itr_ts_files():\n print('\\nStart transcode [%s]' % path)\n _exec_transcode(path)\n os.remove(path_lock)\n print('Finish transcode [%s]' % path)\n except FileExistsError as e:\n print(e)\n return\n\n\nif __name__ == '__main__':\n transcode()\n",
"step-4": "<mask token>\npath_ffmpeg = 'C:\\\\work\\\\ffmpeg\\\\ffmpeg-3.4.2-win64-static\\\\bin\\\\ffmpeg.exe'\ndir_ts_files = 'E:\\\\ts'\ndir_output = 'E:\\\\hb'\npath_lock = 'C:\\\\work\\\\ffmpeg\\\\encode.lock'\n\n\ndef get_work_base(f_base):\n re_num = re.compile('(\\\\d+)\\\\-.+')\n r = re_num.search(f_base)\n res = ''\n if r:\n print(r.group(1))\n res = r.group(1)\n else:\n print('error... cant find f_base.')\n return res\n\n\ndef chk_tmp_file(f_tmp):\n pass\n\n\ndef get_file_base(path):\n fname = os.path.basename(path)\n return fname.split('.ts')[0]\n\n\ndef _exec_transcode(path):\n f_base = get_file_base(path)\n work_base = get_work_base(f_base)\n f_in = os.path.join(dir_ts_files, f_base + '.ts')\n f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')\n f_out = os.path.join(dir_output, f_base + '.m4v')\n opt = (\n '-i \"%(f_in)s\" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit \"%(f_tmp)s\"'\n )\n enc_args = opt % vars()\n if os.path.exists(f_tmp):\n os.remove(f_tmp)\n cmd = path_ffmpeg + ' ' + enc_args\n print(cmd)\n res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)\n if res and res.returncode == 0:\n print('Well done. encoding .....')\n print('rename')\n os.rename(f_tmp, f_out)\n else:\n print('failed ..... remove tmp-file')\n os.remove(f_tmp)\n return\n\n\ndef del_files():\n pass\n\n\ndef itr_ts_files():\n for path in os.listdir(dir_ts_files):\n if path.endswith('.ts'):\n yield path\n\n\ndef transcode():\n try:\n with open(path_lock, 'x') as fl:\n for path in itr_ts_files():\n print('\\nStart transcode [%s]' % path)\n _exec_transcode(path)\n os.remove(path_lock)\n print('Finish transcode [%s]' % path)\n except FileExistsError as e:\n print(e)\n return\n\n\nif __name__ == '__main__':\n transcode()\n",
"step-5": "\n\nimport os\nimport subprocess\nimport re\n# import fcntl\n\n# path_ffmpeg = \npath_ffmpeg = r'C:\\work\\ffmpeg\\ffmpeg-3.4.2-win64-static\\bin\\ffmpeg.exe'\ndir_ts_files = r'E:\\ts'\ndir_output = r'E:\\hb'\npath_lock = r'C:\\work\\ffmpeg\\encode.lock'\n\ndef get_work_base(f_base):\n re_num = re.compile(r'(\\d+)\\-.+')\n r = re_num.search(f_base)\n res = ''\n if r:\n print(r.group(1))\n res = r.group(1)\n else:\n print('error... cant find f_base.')\n return res\n\ndef chk_tmp_file(f_tmp):\n pass\n\ndef get_file_base(path):\n fname = os.path.basename(path)\n return fname.split('.ts')[0]\n\ndef _exec_transcode(path):\n f_base = get_file_base(path)\n work_base = get_work_base(f_base)\n\n f_in = os.path.join(dir_ts_files, f_base + '.ts')\n f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')\n f_out = os.path.join(dir_output, f_base + '.m4v')\n \n #opt = '-i %(f_in)s -vf scale=720:-1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'\n #opt = '-i %(f_in)s -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -g 1 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'\n opt = '-i \"%(f_in)s\" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit \"%(f_tmp)s\"'\n #opt = '-i %(f_in)s -vf scale=840:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 26 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'\n enc_args = opt % vars()\n\n if os.path.exists(f_tmp):\n os.remove(f_tmp)\n\n # cmd = ' '.join([ffmpeg, f_in, f_out])\n cmd = path_ffmpeg + ' ' + enc_args\n print(cmd)\n # res = subprocess.run([ffmpeg, enc_args], stdout=subprocess.PIPE)\n res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)\n #return res\n if res and res.returncode == 0:\n print('Well done. encoding .....')\n print('rename')\n os.rename(f_tmp, f_out)\n else:\n print('failed ..... remove tmp-file')\n os.remove(f_tmp)\n return\n\ndef del_files():\n pass\n\ndef itr_ts_files():\n # [print(path) for path in os.listdir(dir_ts) if path.endswith('.ts')]\n for path in os.listdir(dir_ts_files):\n if path.endswith('.ts'):\n yield path\n\ndef transcode():\n # 複数プロセス起動を防ぐためファイルロックを利用。x-modeでopen (for Windows)\n #os.remove(path_lock)\n #return\n try:\n with open(path_lock, 'x') as fl:\n for path in itr_ts_files():\n print('\\nStart transcode [%s]' % path)\n _exec_transcode(path)\n os.remove(path_lock)\n print('Finish transcode [%s]' % path)\n except FileExistsError as e:\n # print('ロックを獲得できませんでした... エンコード中')\n print(e)\n return\n\n\n# TODO\n# tmp-file をロック。できなければ終了。\n# f_out があれば、終了。\n# f_in を削除 (正常終了の場合)\n# ts ファイルのステータスチェック(録画済か録画中か?)\n# ts ファイルをみつけて、エンコード開始\n\n\nif __name__ == '__main__':\n transcode()\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
#!/usr/bin/env python
#coding:utf-8
'''
Created on 2016年8月29日
@author: lichen
'''
def custom_proc(request):
"""
自定义context_processors
"""
return {
"context_test":"test"
}
|
normal
|
{
"blob_id": "43ecb173e3d306284f2122410b5b74945572f683",
"index": 8104,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef custom_proc(request):\n \"\"\"\n 自定义context_processors\n \"\"\"\n return {'context_test': 'test'}\n",
"step-3": "#!/usr/bin/env python\n#coding:utf-8\n\n'''\nCreated on 2016年8月29日\n\n@author: lichen\n'''\n\ndef custom_proc(request):\n \"\"\"\n 自定义context_processors\n \"\"\"\n return {\n \"context_test\":\"test\"\n }",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def main():
piso = largura * comprimento
volume_sala = largura * comprimento * altura
area = 2 * altura * largura + 2 * altura * comprimento
print(piso)
print(volume_sala)
print(area)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def main():
piso = largura * comprimento
volume_sala = largura * comprimento * altura
area = 2 * altura * largura + 2 * altura * comprimento
print(piso)
print(volume_sala)
print(area)
<|reserved_special_token_0|>
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
def main():
piso = largura * comprimento
volume_sala = largura * comprimento * altura
area = 2 * altura * largura + 2 * altura * comprimento
print(piso)
print(volume_sala)
print(area)
altura = float(input(''))
largura = float(input(''))
comprimento = float(input(''))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
def main():
piso = largura * comprimento
volume_sala = largura * comprimento * altura
area = 2 * altura * largura + 2 * altura * comprimento
print(piso)
print(volume_sala)
print(area)
altura = float(input(""))
largura = float(input(""))
comprimento = float(input(""))
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "d78fd8ebf9ef55700a25a9ce96d9094f1bfa564e",
"index": 6455,
"step-1": "<mask token>\n",
"step-2": "def main():\n piso = largura * comprimento\n volume_sala = largura * comprimento * altura\n area = 2 * altura * largura + 2 * altura * comprimento\n print(piso)\n print(volume_sala)\n print(area)\n\n\n<mask token>\n",
"step-3": "def main():\n piso = largura * comprimento\n volume_sala = largura * comprimento * altura\n area = 2 * altura * largura + 2 * altura * comprimento\n print(piso)\n print(volume_sala)\n print(area)\n\n\n<mask token>\nif __name__ == '__main__':\n main()\n",
"step-4": "def main():\n piso = largura * comprimento\n volume_sala = largura * comprimento * altura\n area = 2 * altura * largura + 2 * altura * comprimento\n print(piso)\n print(volume_sala)\n print(area)\n\n\naltura = float(input(''))\nlargura = float(input(''))\ncomprimento = float(input(''))\nif __name__ == '__main__':\n main()\n",
"step-5": "def main():\n piso = largura * comprimento\n volume_sala = largura * comprimento * altura\n area = 2 * altura * largura + 2 * altura * comprimento\n print(piso)\n print(volume_sala)\n print(area)\n\naltura = float(input(\"\"))\nlargura = float(input(\"\"))\ncomprimento = float(input(\"\"))\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LSTMCallback(Callback):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LSTMCallback(Callback):
def on_test_end(self, trainer, pl_module):
f = open('/evaluation.log', 'w')
for ev in pl_module.evaluation_data:
f.write(ev + '\n')
Validator(pl_module.evaluation_data, ['speed'])
<|reserved_special_token_1|>
from pytorch_lightning.callbacks import Callback
from evaluation.validator import Validator
class LSTMCallback(Callback):
def on_test_end(self, trainer, pl_module):
f = open('/evaluation.log', 'w')
for ev in pl_module.evaluation_data:
f.write(ev + '\n')
Validator(pl_module.evaluation_data, ['speed'])
|
flexible
|
{
"blob_id": "42743ee2a812d8fe6fc036ba97daaff5be35564d",
"index": 4618,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LSTMCallback(Callback):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LSTMCallback(Callback):\n\n def on_test_end(self, trainer, pl_module):\n f = open('/evaluation.log', 'w')\n for ev in pl_module.evaluation_data:\n f.write(ev + '\\n')\n Validator(pl_module.evaluation_data, ['speed'])\n",
"step-4": "from pytorch_lightning.callbacks import Callback\nfrom evaluation.validator import Validator\n\n\nclass LSTMCallback(Callback):\n\n def on_test_end(self, trainer, pl_module):\n f = open('/evaluation.log', 'w')\n for ev in pl_module.evaluation_data:\n f.write(ev + '\\n')\n Validator(pl_module.evaluation_data, ['speed'])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import hashlib
import json
import logging
import os
import urllib.parse
import uuid
from datetime import datetime
import pytz
from celery import states as celery_states
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.contrib.sessions.models import Session
from django.core.cache import cache
from django.core.exceptions import MultipleObjectsReturned
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import PermissionDenied
from django.core.exceptions import ValidationError
from django.core.files.storage import default_storage
from django.core.files.storage import FileSystemStorage
from django.core.mail import send_mail
from django.core.validators import MaxValueValidator
from django.core.validators import MinValueValidator
from django.db import IntegrityError
from django.db import models
from django.db.models import Count
from django.db.models import Exists
from django.db.models import F
from django.db.models import Index
from django.db.models import JSONField
from django.db.models import Max
from django.db.models import OuterRef
from django.db.models import Q
from django.db.models import Subquery
from django.db.models import Sum
from django.db.models import UUIDField as DjangoUUIDField
from django.db.models import Value
from django.db.models.expressions import ExpressionList
from django.db.models.expressions import RawSQL
from django.db.models.functions import Lower
from django.db.models.indexes import IndexExpression
from django.db.models.query_utils import DeferredAttribute
from django.db.models.sql import Query
from django.dispatch import receiver
from django.utils import timezone
from django.utils.translation import gettext as _
from django_celery_results.models import TaskResult
from django_cte import With
from le_utils import proquint
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import format_presets
from le_utils.constants import languages
from le_utils.constants import roles
from model_utils import FieldTracker
from mptt.models import MPTTModel
from mptt.models import raise_if_unsaved
from mptt.models import TreeForeignKey
from postmark.core import PMMailInactiveRecipientException
from postmark.core import PMMailUnauthorizedException
from rest_framework.authtoken.models import Token
from rest_framework.fields import get_attribute
from rest_framework.utils.encoders import JSONEncoder
from contentcuration.constants import channel_history
from contentcuration.constants import completion_criteria
from contentcuration.constants import user_history
from contentcuration.constants.contentnode import kind_activity_map
from contentcuration.db.models.expressions import Array
from contentcuration.db.models.functions import ArrayRemove
from contentcuration.db.models.functions import Unnest
from contentcuration.db.models.manager import CustomContentNodeTreeManager
from contentcuration.db.models.manager import CustomManager
from contentcuration.statistics import record_channel_stats
from contentcuration.utils.cache import delete_public_channel_cache_keys
from contentcuration.utils.parser import load_json_string
from contentcuration.viewsets.sync.constants import ALL_CHANGES
from contentcuration.viewsets.sync.constants import ALL_TABLES
EDIT_ACCESS = "edit"
VIEW_ACCESS = "view"
DEFAULT_CONTENT_DEFAULTS = {
'license': None,
'language': None,
'author': None,
'aggregator': None,
'provider': None,
'copyright_holder': None,
'license_description': None,
'mastery_model': exercises.NUM_CORRECT_IN_A_ROW_5,
'm_value': 5,
'n_value': 5,
'auto_derive_video_thumbnail': True,
'auto_derive_audio_thumbnail': True,
'auto_derive_document_thumbnail': True,
'auto_derive_html5_thumbnail': True,
'auto_derive_exercise_thumbnail': True,
'auto_randomize_questions': True,
}
DEFAULT_USER_PREFERENCES = json.dumps(DEFAULT_CONTENT_DEFAULTS, ensure_ascii=False)
def to_pk(model_or_pk):
if isinstance(model_or_pk, models.Model):
return model_or_pk.pk
return model_or_pk
class UserManager(BaseUserManager):
def create_user(self, email, first_name, last_name, password=None):
if not email:
raise ValueError('Email address not specified')
new_user = self.model(
email=self.normalize_email(email),
)
new_user.set_password(password)
new_user.first_name = first_name
new_user.last_name = last_name
new_user.save(using=self._db)
return new_user
def create_superuser(self, email, first_name, last_name, password=None):
new_user = self.create_user(email, first_name, last_name, password=password)
new_user.is_admin = True
new_user.save(using=self._db)
return new_user
class UniqueActiveUserIndex(Index):
def create_sql(self, model, schema_editor, using='', **kwargs):
"""
This is a vendored and modified version of the Django create_sql method
We do this so that we can monkey patch in the unique index statement onto the schema_editor
while we create the statement for this index, and then revert it to normal.
We should remove this as soon as Django natively supports UniqueConstraints with Expressions.
This should hopefully be the case in Django 3.3.
"""
include = [model._meta.get_field(field_name).column for field_name in self.include]
condition = self._get_condition_sql(model, schema_editor)
if self.expressions:
index_expressions = []
for expression in self.expressions:
index_expression = IndexExpression(expression)
index_expression.set_wrapper_classes(schema_editor.connection)
index_expressions.append(index_expression)
expressions = ExpressionList(*index_expressions).resolve_expression(
Query(model, alias_cols=False),
)
fields = None
col_suffixes = None
else:
fields = [
model._meta.get_field(field_name)
for field_name, _ in self.fields_orders
]
col_suffixes = [order[1] for order in self.fields_orders]
expressions = None
sql = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(include)s%(condition)s"
# Store the normal SQL statement for indexes
old_create_index_sql = schema_editor.sql_create_index
# Replace it with our own unique index so that this index actually adds a constraint
schema_editor.sql_create_index = sql
# Generate the SQL staetment that we want to return
return_statement = schema_editor._create_index_sql(
model, fields=fields, name=self.name, using=using,
db_tablespace=self.db_tablespace, col_suffixes=col_suffixes,
opclasses=self.opclasses, condition=condition, include=include,
expressions=expressions, **kwargs,
)
# Reinstate the previous index SQL statement so that we have done no harm
schema_editor.sql_create_index = old_create_index_sql
# Return our SQL statement
return return_statement
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=100, unique=True)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField('active', default=False,
help_text='Designates whether this user should be treated as active.')
is_staff = models.BooleanField('staff status', default=False,
help_text='Designates whether the user can log into this admin site.')
date_joined = models.DateTimeField('date joined', default=timezone.now)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='user_clipboard', on_delete=models.SET_NULL)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
disk_space = models.FloatField(default=524288000, help_text='How many bytes a user can upload')
disk_space_used = models.FloatField(default=0, help_text='How many bytes a user has uploaded')
information = JSONField(null=True)
content_defaults = JSONField(default=dict)
policies = JSONField(default=dict, null=True)
feature_flags = JSONField(default=dict, null=True)
deleted = models.BooleanField(default=False, db_index=True)
_field_updates = FieldTracker(fields=[
# Field to watch for changes
"disk_space",
])
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
def __unicode__(self):
return self.email
def delete(self):
"""
Soft deletes the user account.
"""
self.deleted = True
# Deactivate the user to disallow authentication and also
# to let the user verify the email again after recovery.
self.is_active = False
self.save()
self.history.create(user_id=self.pk, action=user_history.DELETION)
def recover(self):
"""
Use this method when we want to recover a user.
"""
self.deleted = False
self.save()
self.history.create(user_id=self.pk, action=user_history.RECOVERY)
def hard_delete_user_related_data(self):
"""
Hard delete all user related data. But keeps the user record itself intact.
User related data that gets hard deleted are:
- sole editor non-public channels.
- sole editor non-public channelsets.
- sole editor non-public channels' content nodes and its underlying files that are not
used by any other channel.
- all user invitations.
"""
from contentcuration.viewsets.common import SQCount
# Hard delete invitations associated to this account.
self.sent_to.all().delete()
self.sent_by.all().delete()
editable_channels_user_query = (
User.objects.filter(editable_channels__id=OuterRef('id'))
.values_list('id', flat=True)
.distinct()
)
non_public_channels_sole_editor = self.editable_channels.annotate(num_editors=SQCount(
editable_channels_user_query, field="id")).filter(num_editors=1, public=False)
# Point sole editor non-public channels' contentnodes to orphan tree to let
# our garbage collection delete the nodes and underlying files.
ContentNode._annotate_channel_id(ContentNode.objects).filter(channel_id__in=list(
non_public_channels_sole_editor.values_list("id", flat=True))).update(parent_id=settings.ORPHANAGE_ROOT_ID)
# Hard delete non-public channels associated with this user (if user is the only editor).
non_public_channels_sole_editor.delete()
# Hard delete non-public channel collections associated with this user (if user is the only editor).
user_query = (
User.objects.filter(channel_sets__id=OuterRef('id'))
.values_list('id', flat=True)
.distinct()
)
self.channel_sets.annotate(num_editors=SQCount(user_query, field="id")).filter(num_editors=1, public=False).delete()
# Create history!
self.history.create(user_id=self.pk, action=user_history.RELATED_DATA_HARD_DELETION)
def can_edit(self, channel_id):
return Channel.filter_edit_queryset(Channel.objects.all(), self).filter(pk=channel_id).exists()
def check_space(self, size, checksum):
if self.is_admin:
return True
active_files = self.get_user_active_files()
if active_files.filter(checksum=checksum).exists():
return True
space = self.get_available_space(active_files=active_files)
if space < size:
raise PermissionDenied(_("Not enough space. Check your storage under Settings page."))
def check_channel_space(self, channel):
active_files = self.get_user_active_files()
staging_tree_id = channel.staging_tree.tree_id
channel_files = self.files\
.filter(contentnode__tree_id=staging_tree_id)\
.values('checksum')\
.distinct()\
.exclude(checksum__in=active_files.values_list('checksum', flat=True))
staged_size = float(channel_files.aggregate(used=Sum('file_size'))['used'] or 0)
if self.get_available_space(active_files=active_files) < (staged_size):
raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))
def check_staged_space(self, size, checksum):
if self.staged_files.filter(checksum=checksum).exists():
return True
space = self.get_available_staged_space()
if space < size:
raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))
def get_available_staged_space(self):
space_used = self.staged_files.values('checksum').distinct().aggregate(size=Sum("file_size"))['size'] or 0
return float(max(self.disk_space - space_used, 0))
def get_available_space(self, active_files=None):
return float(max(self.disk_space - self.get_space_used(active_files=active_files), 0))
def get_user_active_trees(self):
return self.editable_channels.exclude(deleted=True)\
.values(tree_id=F("main_tree__tree_id"))
def get_user_active_files(self):
cte = With(self.get_user_active_trees().distinct())
return cte.join(self.files.get_queryset(), contentnode__tree_id=cte.col.tree_id)\
.with_cte(cte)\
.values('checksum')\
.distinct()
def get_space_used(self, active_files=None):
active_files = active_files or self.get_user_active_files()
files = active_files.aggregate(total_used=Sum('file_size'))
return float(files['total_used'] or 0)
def set_space_used(self):
self.disk_space_used = self.get_space_used()
self.save()
return self.disk_space_used
def get_space_used_by_kind(self):
active_files = self.get_user_active_files()
files = active_files.values('preset__kind_id')\
.annotate(space=Sum('file_size'))\
.order_by()
kind_dict = {}
for item in files:
kind_dict[item['preset__kind_id']] = item['space']
return kind_dict
def email_user(self, subject, message, from_email=None, **kwargs):
try:
# msg = EmailMultiAlternatives(subject, message, from_email, [self.email])
# msg.attach_alternative(kwargs["html_message"],"text/html")
# msg.send()
send_mail(subject, message, from_email, [self.email], **kwargs)
except (PMMailInactiveRecipientException, PMMailUnauthorizedException) as e:
logging.error(str(e))
def clean(self):
super(User, self).clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Returns the short name for the user.
"""
return self.first_name
def get_token(self):
token, _ = Token.objects.get_or_create(user=self)
return token.key
def save(self, *args, **kwargs):
from contentcuration.utils.user import calculate_user_storage
super(User, self).save(*args, **kwargs)
if 'disk_space' in self._field_updates.changed():
calculate_user_storage(self.pk)
changed = False
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
changed = True
if not self.clipboard_tree:
self.clipboard_tree = ContentNode.objects.create(title=self.email + " clipboard", kind_id=content_kinds.TOPIC)
self.clipboard_tree.save()
changed = True
if changed:
self.save()
class Meta:
verbose_name = "User"
verbose_name_plural = "Users"
indexes = [
UniqueActiveUserIndex(Lower('email'), condition=Q(is_active=True), name="contentcura_email_d4d492_idx")
]
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
# all shared editors
all_editable = User.editable_channels.through.objects.all()
editable = all_editable.filter(
channel_id__in=all_editable.filter(user_id=user.pk).values_list("channel_id", flat=True)
)
# all shared viewers
all_view_only = User.view_only_channels.through.objects.all()
view_only = all_view_only.filter(
channel_id__in=all_view_only.filter(user_id=user.pk).values_list("channel_id", flat=True)
)
return queryset.filter(
Q(pk=user.pk)
| Q(pk__in=editable.values_list("user_id", flat=True))
| Q(pk__in=view_only.values_list("user_id", flat=True))
)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(pk=user.pk)
@classmethod
def get_for_email(cls, email, deleted=False, **filters):
"""
Returns the appropriate User record given an email, ordered by:
- those with is_active=True first, which there should only ever be one
- otherwise by ID DESC so most recent inactive shoud be returned
Filters out deleted User records by default. To include both deleted and
undeleted user records pass None to the deleted argument.
:param email: A string of the user's email
:param filters: Additional filters to filter the User queryset
:return: User or None
"""
user_qs = User.objects.filter(email__iexact=email.strip())
if deleted is not None:
user_qs = user_qs.filter(deleted=deleted)
return user_qs.filter(**filters).order_by("-is_active", "-id").first()
class UUIDField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(*args, **kwargs)
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def get_default(self):
result = super(UUIDField, self).get_default()
if isinstance(result, uuid.UUID):
result = result.hex
return result
def to_python(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
class MPTTTreeIDManager(models.Model):
"""
Because MPTT uses plain integers for tree IDs and does not use an auto-incrementing field for them,
the same ID can sometimes be assigned to two trees if two channel create ops happen concurrently.
As we are using this table only for the ID generation, it does not need any fields.
We resolve this by creating a dummy table and using its ID as the tree index to take advantage of the db's
concurrency-friendly way of generating sequential integer IDs. There is a custom migration that ensures
that the number of records (and thus id) matches the max tree ID number when this table gets added.
"""
def file_on_disk_name(instance, filename):
"""
Create a name spaced file path from the File obejct's checksum property.
This path will be used to store the content copy
:param instance: File (content File model)
:param filename: str
:return: str
"""
return generate_file_on_disk_name(instance.checksum, filename)
def generate_file_on_disk_name(checksum, filename):
""" Separated from file_on_disk_name to allow for simple way to check if has already exists """
h = checksum
basename, ext = os.path.splitext(filename)
directory = os.path.join(settings.STORAGE_ROOT, h[0], h[1])
if not os.path.exists(directory):
os.makedirs(directory)
return os.path.join(directory, h + ext.lower())
def object_storage_name(instance, filename):
"""
Create a name spaced file path from the File obejct's checksum property.
This path will be used to store the content copy
:param instance: File (content File model)
:param filename: str
:return: str
"""
default_ext = ''
if instance.file_format_id:
default_ext = '.{}'.format(instance.file_format_id)
return generate_object_storage_name(instance.checksum, filename, default_ext)
def generate_object_storage_name(checksum, filename, default_ext=''):
""" Separated from file_on_disk_name to allow for simple way to check if has already exists """
h = checksum
basename, actual_ext = os.path.splitext(filename)
ext = actual_ext if actual_ext else default_ext
# Use / instead of os.path.join as Windows makes this \\
directory = "/".join([settings.STORAGE_ROOT, h[0], h[1]])
return os.path.join(directory, h + ext.lower())
def generate_storage_url(filename, request=None, *args):
"""
Generate a storage URL for the given content filename.
"""
path = generate_object_storage_name(os.path.splitext(filename)[0], filename)
# There are three scenarios where Studio might be run as:
#
# 1. In normal kubernetes, nginx will proxy for us. We'll know we're in kubernetes when the
# environment variable RUN_MODE=k8s
#
# 2. In Docker Compose and bare metal runserver, we'll be running in runserver, and minio
# will be exposed in port 9000 in the host's localhost network.
# Note (aron): returning the true storage URL (e.g. https://storage.googleapis.com/storage/a.mp4)
# isn't too important, because we have CDN in front of our servers, so it should be cached.
# But change the logic here in case there is a potential for bandwidth and latency improvement.
# Detect our current state first
run_mode = os.getenv("RUN_MODE")
# if we're running inside k8s, then just serve the normal /content/{storage,databases} URL,
# and let nginx handle proper proxying.
if run_mode == "k8s":
url = "/content/{path}".format(
path=path,
)
# if we're in docker-compose or in baremetal, just return the object storage URL as localhost:9000
elif run_mode == "docker-compose" or run_mode is None:
# generate the minio storage URL, so we can get the GET parameters that give everyone
# access even if they don't need to log in
params = urllib.parse.urlparse(default_storage.url(path)).query
host = "localhost"
port = 9000 # hardcoded to the default minio IP address
url = "http://{host}:{port}/{bucket}/{path}?{params}".format(
host=host,
port=port,
bucket=settings.AWS_S3_BUCKET_NAME,
path=path,
params=params,
)
return url
class FileOnDiskStorage(FileSystemStorage):
"""
Overrider FileSystemStorage's default save method to ignore duplicated file.
"""
def get_available_name(self, name):
return name
def _save(self, name, content):
if self.exists(name):
# if the file exists, do not call the superclasses _save method
logging.warn('Content copy "%s" already exists!' % name)
return name
return super(FileOnDiskStorage, self)._save(name, content)
class SecretToken(models.Model):
"""Tokens for channels"""
token = models.CharField(max_length=100, unique=True)
is_primary = models.BooleanField(default=False)
@classmethod
def exists(cls, token):
"""
Return true when the token string given by string already exists.
Returns false otherwise.
"""
return cls.objects.filter(token=token).exists()
@classmethod
def generate_new_token(cls):
"""
Creates a primary secret token for the current channel using a proquint
string. Creates a secondary token containing the channel id.
These tokens can be used to refer to the channel to download its content
database.
"""
token = proquint.generate()
# Try 100 times to generate a unique token.
TRIALS = 100
for __ in range(TRIALS):
token = proquint.generate()
if SecretToken.exists(token):
continue
break
# after TRIALS attempts and we didn't get a unique token,
# just raise an error.
# See https://stackoverflow.com/a/9980160 on what for-else loop does.
else:
raise ValueError("Cannot generate new token")
# We found a unique token! Save it
return token
def __str__(self):
return "{}-{}".format(self.token[:5], self.token[5:])
def get_channel_thumbnail(channel):
if not isinstance(channel, dict):
channel = channel.__dict__
if channel.get("thumbnail_encoding"):
thumbnail_data = channel.get("thumbnail_encoding")
if thumbnail_data.get("base64"):
return thumbnail_data["base64"]
if channel.get("thumbnail") and 'static' not in channel.get("thumbnail"):
return generate_storage_url(channel.get("thumbnail"))
return '/static/img/kolibri_placeholder.png'
CHANNEL_NAME_INDEX_NAME = "channel_name_idx"
# A list of all the FKs from Channel object
# to ContentNode trees
# used for permissions filtering
CHANNEL_TREES = (
"main_tree",
"chef_tree",
"trash_tree",
"staging_tree",
"previous_tree",
)
def boolean_val(val):
return Value(val, output_field=models.BooleanField())
class PermissionCTE(With):
tree_id_fields = [
"channel__{}__tree_id".format(tree_name)
for tree_name in CHANNEL_TREES
]
def __init__(self, model, user_id, **kwargs):
queryset = model.objects.filter(user_id=user_id)\
.annotate(
tree_id=Unnest(ArrayRemove(Array(*self.tree_id_fields), None), output_field=models.IntegerField())
)
super(PermissionCTE, self).__init__(queryset=queryset.values("user_id", "channel_id", "tree_id"), **kwargs)
@classmethod
def editable_channels(cls, user_id):
return PermissionCTE(User.editable_channels.through, user_id, name="editable_channels_cte")
@classmethod
def view_only_channels(cls, user_id):
return PermissionCTE(User.view_only_channels.through, user_id, name="view_only_channels_cte")
def exists(self, *filters):
return Exists(self.queryset().filter(*filters).values("user_id"))
class Channel(models.Model):
""" Permissions come from association with organizations """
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
tagline = models.CharField(max_length=150, blank=True, null=True)
version = models.IntegerField(default=0)
thumbnail = models.TextField(blank=True, null=True)
thumbnail_encoding = JSONField(default=dict)
editors = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='editable_channels',
verbose_name="editors",
help_text="Users with edit rights",
blank=True,
)
viewers = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='view_only_channels',
verbose_name="viewers",
help_text="Users with view only rights",
blank=True,
)
language = models.ForeignKey('Language', null=True, blank=True, related_name='channel_language', on_delete=models.SET_NULL)
trash_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_trash', on_delete=models.SET_NULL)
clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_clipboard', on_delete=models.SET_NULL)
main_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_main', on_delete=models.SET_NULL)
staging_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_staging', on_delete=models.SET_NULL)
chef_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_chef', on_delete=models.SET_NULL)
previous_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_previous', on_delete=models.SET_NULL)
bookmarked_by = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='bookmarked_channels',
verbose_name="bookmarked by",
)
deleted = models.BooleanField(default=False, db_index=True)
public = models.BooleanField(default=False, db_index=True)
preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)
content_defaults = JSONField(default=dict)
priority = models.IntegerField(default=0, help_text="Order to display public channels")
last_published = models.DateTimeField(blank=True, null=True)
secret_tokens = models.ManyToManyField(
SecretToken,
related_name='channels',
verbose_name="secret tokens",
blank=True,
)
source_url = models.CharField(max_length=200, blank=True, null=True)
demo_server_url = models.CharField(max_length=200, blank=True, null=True)
# Fields specific to content generated by Ricecooker
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
ricecooker_version = models.CharField(max_length=100, blank=True, null=True)
# Fields to calculate when channel is published
published_data = JSONField(default=dict)
icon_encoding = models.TextField(blank=True, null=True)
total_resource_count = models.IntegerField(default=0)
published_kind_count = models.TextField(blank=True, null=True)
published_size = models.FloatField(default=0)
included_languages = models.ManyToManyField(
"Language",
related_name='channels',
verbose_name="languages",
blank=True,
)
_field_updates = FieldTracker(fields=[
# Field to watch for changes
"description",
"language_id",
"thumbnail",
"name",
"thumbnail_encoding",
# watch these fields for changes
# but exclude them from setting changed
# on the main tree
"deleted",
"public",
"main_tree_id",
"version",
])
@classmethod
def get_editable(cls, user, channel_id):
return cls.filter_edit_queryset(cls.objects.all(), user).get(id=channel_id)
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
# it won't return anything
if not user_id:
return queryset.none()
edit = Exists(User.editable_channels.through.objects.filter(user_id=user_id, channel_id=OuterRef("id")))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
user_email = not user.is_anonymous and user.email
if user_id:
filters = dict(user_id=user_id, channel_id=OuterRef("id"))
edit = Exists(User.editable_channels.through.objects.filter(**filters).values("user_id"))
view = Exists(User.view_only_channels.through.objects.filter(**filters).values("user_id"))
else:
edit = boolean_val(False)
view = boolean_val(False)
queryset = queryset.annotate(
edit=edit,
view=view,
)
if user_id and user.is_admin:
return queryset
permission_filter = Q()
if user_id:
pending_channels = Invitation.objects.filter(email=user_email, revoked=False, declined=False, accepted=False).values_list(
"channel_id", flat=True
)
permission_filter = (
Q(view=True) | Q(edit=True) | Q(deleted=False, id__in=pending_channels)
)
return queryset.filter(permission_filter | Q(deleted=False, public=True))
@classmethod
def get_all_channels(cls):
return cls.objects.select_related('main_tree').prefetch_related('editors', 'viewers').distinct()
def resource_size_key(self):
return "{}_resource_size".format(self.pk)
# Might be good to display resource size, but need to improve query time first
def get_resource_size(self):
cached_data = cache.get(self.resource_size_key())
if cached_data:
return cached_data
tree_id = self.main_tree.tree_id
files = File.objects.select_related('contentnode', 'assessment_item')\
.filter(contentnode__tree_id=tree_id)\
.values('checksum', 'file_size')\
.distinct()\
.aggregate(resource_size=Sum('file_size'))
cache.set(self.resource_size_key(), files['resource_size'] or 0, None)
return files['resource_size'] or 0
def on_create(self):
record_channel_stats(self, None)
if not self.content_defaults:
self.content_defaults = DEFAULT_CONTENT_DEFAULTS
if not self.main_tree:
self.main_tree = ContentNode.objects.create(
title=self.name,
kind_id=content_kinds.TOPIC,
content_id=self.id,
node_id=self.id,
original_channel_id=self.id,
source_channel_id=self.id,
changed=True,
complete=True,
)
# Ensure that locust or unit tests raise if there are any concurrency issues with tree ids.
if settings.DEBUG:
if ContentNode.objects.filter(parent=None, tree_id=self.main_tree.tree_id).count() != 1:
raise AssertionError
if not self.trash_tree:
self.trash_tree = ContentNode.objects.create(
title=self.name,
kind_id=content_kinds.TOPIC,
content_id=self.id,
node_id=self.id,
)
# if this change affects the published channel list, clear the channel cache
if self.public and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def on_update(self):
from contentcuration.utils.user import calculate_user_storage
original_values = self._field_updates.changed()
record_channel_stats(self, original_values)
blacklist = set([
"public",
"main_tree_id",
"version",
])
if self.main_tree and original_values and any((True for field in original_values if field not in blacklist)):
# Changing channel metadata should also mark main_tree as changed
self.main_tree.changed = True
# Check if original thumbnail is no longer referenced
if "thumbnail" in original_values and original_values["thumbnail"] and 'static' not in original_values["thumbnail"]:
filename, ext = os.path.splitext(original_values["thumbnail"])
delete_empty_file_reference(filename, ext[1:])
# Refresh storage for all editors on the channel
if "deleted" in original_values:
for editor in self.editors.all():
calculate_user_storage(editor.pk)
# Delete db if channel has been deleted and mark as unpublished
if "deleted" in original_values and not original_values["deleted"]:
self.pending_editors.all().delete()
export_db_storage_path = os.path.join(settings.DB_ROOT, "{channel_id}.sqlite3".format(channel_id=self.id))
if default_storage.exists(export_db_storage_path):
default_storage.delete(export_db_storage_path)
if self.main_tree:
self.main_tree.published = False
if self.main_tree and self.main_tree._field_updates.changed():
self.main_tree.save()
# if this change affects the published channel list, clear the channel cache
if "public" in original_values and (self.main_tree and self.main_tree.published):
delete_public_channel_cache_keys()
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
super(Channel, self).save(*args, **kwargs)
def get_thumbnail(self):
return get_channel_thumbnail(self)
def has_changes(self):
return self.main_tree.get_descendants(include_self=True).filter(changed=True).exists()
def get_date_modified(self):
return self.main_tree.get_descendants(include_self=True).aggregate(last_modified=Max('modified'))['last_modified']
def get_resource_count(self):
return self.main_tree.get_descendants().exclude(kind_id=content_kinds.TOPIC).order_by('content_id').distinct('content_id').count()
def get_human_token(self):
return self.secret_tokens.get(is_primary=True)
def get_channel_id_token(self):
return self.secret_tokens.get(token=self.id)
def make_token(self):
token = self.secret_tokens.create(token=SecretToken.generate_new_token(), is_primary=True)
self.secret_tokens.get_or_create(token=self.id)
return token
def make_public(self, bypass_signals=False):
"""
Sets the current channel object to be public and viewable by anyone.
If bypass_signals is True, update the model in such a way that we
prevent any model signals from running due to the update.
Returns the same channel object.
"""
if bypass_signals:
self.public = True # set this attribute still, so the object will be updated
Channel.objects.filter(id=self.id).update(public=True)
# clear the channel cache
delete_public_channel_cache_keys()
else:
self.public = True
self.save()
return self
def mark_created(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.CREATION)
def mark_publishing(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.PUBLICATION)
self.main_tree.publishing = True
self.main_tree.save()
def mark_deleted(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.DELETION)
self.deleted = True
self.save()
def mark_recovered(self, user):
self.history.create(actor_id=to_pk(user), action=channel_history.RECOVERY)
self.deleted = False
self.save()
@property
def deletion_history(self):
return self.history.filter(action=channel_history.DELETION)
@property
def publishing_history(self):
return self.history.filter(action=channel_history.PUBLICATION)
@classmethod
def get_public_channels(cls, defer_nonmain_trees=False):
"""
Get all public channels.
If defer_nonmain_trees is True, defer the loading of all
trees except for the main_tree."""
if defer_nonmain_trees:
c = (Channel.objects
.filter(public=True)
.exclude(deleted=True)
.select_related('main_tree')
.prefetch_related('editors')
.defer('trash_tree', 'clipboard_tree', 'staging_tree', 'chef_tree', 'previous_tree', 'viewers'))
else:
c = Channel.objects.filter(public=True).exclude(deleted=True)
return c
class Meta:
verbose_name = "Channel"
verbose_name_plural = "Channels"
indexes = [
models.Index(fields=["name"], name=CHANNEL_NAME_INDEX_NAME),
]
index_together = [
["deleted", "public"]
]
CHANNEL_HISTORY_CHANNEL_INDEX_NAME = "idx_channel_history_channel_id"
class ChannelHistory(models.Model):
"""
Model for tracking certain actions performed on a channel
"""
channel = models.ForeignKey('Channel', null=False, blank=False, related_name='history', on_delete=models.CASCADE)
actor = models.ForeignKey('User', null=False, blank=False, related_name='channel_history', on_delete=models.CASCADE)
performed = models.DateTimeField(default=timezone.now)
action = models.CharField(max_length=50, choices=channel_history.choices)
@classmethod
def prune(cls):
"""
Prunes history records by keeping the most recent actions for each channel and type,
and deleting all other older actions
"""
keep_ids = cls.objects.distinct("channel_id", "action").order_by("channel_id", "action", "-performed").values_list("id", flat=True)
cls.objects.exclude(id__in=keep_ids).delete()
class Meta:
verbose_name = "Channel history"
verbose_name_plural = "Channel histories"
indexes = [
models.Index(fields=["channel_id"], name=CHANNEL_HISTORY_CHANNEL_INDEX_NAME),
]
class UserHistory(models.Model):
"""
Model that stores the user's action history.
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False, related_name="history", on_delete=models.CASCADE)
action = models.CharField(max_length=32, choices=user_history.choices)
performed_at = models.DateTimeField(default=timezone.now)
class ChannelSet(models.Model):
# NOTE: this is referred to as "channel collections" on the front-end, but we need to call it
# something else as there is already a ChannelCollection model on the front-end
id = UUIDField(primary_key=True, default=uuid.uuid4)
name = models.CharField(max_length=200, blank=True)
description = models.CharField(max_length=400, blank=True)
public = models.BooleanField(default=False, db_index=True)
editors = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='channel_sets',
verbose_name="editors",
help_text="Users with edit rights",
blank=True,
)
secret_token = models.ForeignKey('SecretToken', null=True, blank=True, related_name='channel_sets', on_delete=models.SET_NULL)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
user_id = not user.is_anonymous and user.id
edit = Exists(User.channel_sets.through.objects.filter(user_id=user_id, channelset_id=OuterRef("id")))
queryset = queryset.annotate(edit=edit)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
return cls.filter_edit_queryset(queryset, user)
def get_channels(self):
if self.secret_token:
return self.secret_token.channels.filter(deleted=False)
def save(self, *args, **kwargs):
if self._state.adding:
self.on_create()
super(ChannelSet, self).save()
def on_create(self):
if not self.secret_token:
self.secret_token = SecretToken.objects.create(token=SecretToken.generate_new_token())
def delete(self, *args, **kwargs):
super(ChannelSet, self).delete(*args, **kwargs)
if self.secret_token:
self.secret_token.delete()
class ContentTag(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
tag_name = models.CharField(max_length=50)
channel = models.ForeignKey('Channel', related_name='tags', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
objects = CustomManager()
def __str__(self):
return self.tag_name
class Meta:
unique_together = ['tag_name', 'channel']
class License(models.Model):
"""
Normalize the license of ContentNode model
"""
license_name = models.CharField(max_length=50)
license_url = models.URLField(blank=True)
license_description = models.TextField(blank=True)
copyright_holder_required = models.BooleanField(default=True)
is_custom = models.BooleanField(default=False)
exists = models.BooleanField(
default=False,
verbose_name="license exists",
help_text="Tells whether or not a content item is licensed to share",
)
@classmethod
def validate_name(cls, name):
if cls.objects.filter(license_name=name).count() == 0:
raise ValidationError('License `{}` does not exist'.format(name))
def __str__(self):
return self.license_name
NODE_ID_INDEX_NAME = "node_id_idx"
NODE_MODIFIED_INDEX_NAME = "node_modified_idx"
NODE_MODIFIED_DESC_INDEX_NAME = "node_modified_desc_idx"
CONTENTNODE_TREE_ID_CACHE_KEY = "contentnode_{pk}__tree_id"
class ContentNode(MPTTModel, models.Model):
"""
By default, all nodes have a title and can be used as a topic.
"""
# Random id used internally on Studio (See `node_id` for id used in Kolibri)
id = UUIDField(primary_key=True, default=uuid.uuid4)
# the content_id is used for tracking a user's interaction with a piece of
# content, in the face of possibly many copies of that content. When a user
# interacts with a piece of content, all substantially similar pieces of
# content should be marked as such as well. We track these "substantially
# similar" types of content by having them have the same content_id.
content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False, db_index=True)
# Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta
node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
# TODO: disallow nulls once existing models have been set
original_channel_id = UUIDField(primary_key=False, editable=False, null=True,
db_index=True) # Original channel copied from
source_channel_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate channel copied from
# Original node_id of node copied from (TODO: original_node_id clashes with original_node field - temporary)
original_source_node_id = UUIDField(primary_key=False, editable=False, null=True,
db_index=True)
source_node_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate node_id of node copied from
# Fields specific to content generated by Ricecooker
source_id = models.CharField(max_length=200, blank=True, null=True)
source_domain = models.CharField(max_length=300, blank=True, null=True)
title = models.CharField(max_length=200, blank=True)
description = models.TextField(blank=True)
kind = models.ForeignKey('ContentKind', related_name='contentnodes', db_index=True, null=True, blank=True, on_delete=models.SET_NULL)
license = models.ForeignKey('License', null=True, blank=True, on_delete=models.SET_NULL)
license_description = models.CharField(max_length=400, null=True, blank=True)
prerequisite = models.ManyToManyField('self', related_name='is_prerequisite_of',
through='PrerequisiteContentRelationship', symmetrical=False, blank=True)
is_related = models.ManyToManyField('self', related_name='relate_to', through='RelatedContentRelationship',
symmetrical=False, blank=True)
language = models.ForeignKey('Language', null=True, blank=True, related_name='content_language', on_delete=models.SET_NULL)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True, on_delete=models.CASCADE)
tags = models.ManyToManyField(ContentTag, symmetrical=False, related_name='tagged_content', blank=True)
# No longer used
sort_order = models.FloatField(max_length=50, default=1, verbose_name="sort order",
help_text="Ascending, lowest number shown first")
copyright_holder = models.CharField(max_length=200, null=True, blank=True, default="",
help_text="Organization of person who holds the essential rights")
# legacy field...
original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='duplicates')
cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='clones')
thumbnail_encoding = models.TextField(blank=True, null=True)
created = models.DateTimeField(default=timezone.now, verbose_name="created")
modified = models.DateTimeField(auto_now=True, verbose_name="modified")
published = models.BooleanField(default=False)
publishing = models.BooleanField(default=False)
complete = models.BooleanField(null=True)
changed = models.BooleanField(default=True)
"""
Extra fields for exercises:
- type: mastery model to use to determine completion
- m: m value for M out of N mastery criteria
- n: n value for M out of N mastery criteria
"""
extra_fields = JSONField(default=dict, blank=True, null=True)
author = models.CharField(max_length=200, blank=True, default="", help_text="Who created this content?",
null=True)
aggregator = models.CharField(max_length=200, blank=True, default="", help_text="Who gathered this content together?",
null=True)
provider = models.CharField(max_length=200, blank=True, default="", help_text="Who distributed this content?",
null=True)
role_visibility = models.CharField(max_length=50, choices=roles.choices, default=roles.LEARNER)
freeze_authoring_data = models.BooleanField(default=False)
# Fields for metadata labels
# These fields use a map to store applied labels
# {
# "<label_id1>": true,
# "<label_id2>": true,
# }
grade_levels = models.JSONField(blank=True, null=True)
resource_types = models.JSONField(blank=True, null=True)
learning_activities = models.JSONField(blank=True, null=True)
accessibility_labels = models.JSONField(blank=True, null=True)
categories = models.JSONField(blank=True, null=True)
learner_needs = models.JSONField(blank=True, null=True)
# A field for storing a suggested duration for the content node
# this duration should be in seconds.
suggested_duration = models.IntegerField(blank=True, null=True, help_text="Suggested duration for the content node (in seconds)")
objects = CustomContentNodeTreeManager()
# Track all updates and ignore a blacklist of attributes
# when we check for changes
_field_updates = FieldTracker()
_permission_filter = Q(tree_id=OuterRef("tree_id"))
@classmethod
def _annotate_channel_id(cls, queryset):
# Annotate channel id
return queryset.annotate(
channel_id=Subquery(
Channel.objects.filter(
main_tree__tree_id=OuterRef("tree_id")
).values_list("id", flat=True)[:1]
)
)
@classmethod
def filter_by_pk(cls, pk):
"""
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always
returns a queryset filtered by pk.
When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode
for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If
a ContentNode does not exist for `pk` then an empty queryset is returned.
"""
query = ContentNode.objects.filter(pk=pk)
if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:
tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))
if tree_id:
query = query.filter(tree_id=tree_id)
else:
tree_id = ContentNode.objects.filter(pk=pk).values_list("tree_id", flat=True).first()
if tree_id:
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk), tree_id, None)
query = query.filter(tree_id=tree_id)
else:
query = query.none()
return query
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef("tree_id")
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(public=True)
)
@raise_if_unsaved
def get_root(self):
# Only topics can be root nodes
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return super(ContentNode, self).get_root()
@raise_if_unsaved
def get_root_id(self):
# Only topics can be root nodes
if self.is_root_node() and self.kind_id != content_kinds.TOPIC:
return self
return ContentNode.objects.values_list('pk', flat=True).get(
tree_id=self._mpttfield('tree_id'),
parent=None,
)
def get_tree_data(self, levels=float('inf')):
"""
Returns `levels`-deep tree information starting at current node.
Args:
levels (int): depth of tree hierarchy to return
Returns:
tree (dict): starting with self, with children list containing either
the just the children's `node_id`s or full recusive tree.
"""
if self.kind_id == content_kinds.TOPIC:
node_data = {
"title": self.title,
"kind": self.kind_id,
"node_id": self.node_id,
"studio_id": self.id,
}
children = self.children.all()
if levels > 0:
node_data["children"] = [c.get_tree_data(levels=levels - 1) for c in children]
return node_data
if self.kind_id == content_kinds.EXERCISE:
return {
"title": self.title,
"kind": self.kind_id,
"count": self.assessment_items.count(),
"node_id": self.node_id,
"studio_id": self.id,
}
return {
"title": self.title,
"kind": self.kind_id,
"file_size": self.files.values('file_size').aggregate(size=Sum('file_size'))['size'],
"node_id": self.node_id,
"studio_id": self.id,
}
def get_original_node(self):
original_node = self.original_node or self
if self.original_channel_id and self.original_source_node_id:
original_tree_id = Channel.objects.select_related("main_tree").get(pk=self.original_channel_id).main_tree.tree_id
original_node = ContentNode.objects.filter(tree_id=original_tree_id, node_id=self.original_source_node_id).first() or \
ContentNode.objects.filter(tree_id=original_tree_id, content_id=self.content_id).first() or self
return original_node
def get_associated_presets(self):
key = "associated_presets_{}".format(self.kind_id)
cached_data = cache.get(key)
if cached_data:
return cached_data
presets = list(FormatPreset.objects.filter(kind=self.kind).values())
cache.set(key, presets, None)
return presets
def get_prerequisites(self):
prerequisite_mapping = {}
prerequisites = self.prerequisite.all()
prereqlist = list(prerequisites)
for prereq in prerequisites:
prlist, prereqmapping = prereq.get_prerequisites()
prerequisite_mapping.update({prereq.pk: prereqmapping})
prereqlist.extend(prlist)
return prereqlist, prerequisite_mapping
def get_postrequisites(self):
postrequisite_mapping = {}
postrequisites = self.is_prerequisite_of.all()
postreqlist = list(postrequisites)
for postreq in postrequisites:
prlist, postreqmapping = postreq.get_postrequisites()
postrequisite_mapping.update({postreq.pk: postreqmapping})
postreqlist.extend(prlist)
return postreqlist, postrequisite_mapping
def get_channel_id(self):
if hasattr(self, "channel_id"):
return self.channel_id
channel = self.get_channel()
if channel:
return channel.id
return None
def get_channel(self):
try:
root = self.get_root()
if not root:
return None
return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(previous_tree=root)).first()
except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):
return None
def get_thumbnail(self):
# Problems with json.loads, so use ast.literal_eval to get dict
if self.thumbnail_encoding:
thumbnail_data = load_json_string(self.thumbnail_encoding)
if type(thumbnail_data) is dict and thumbnail_data.get("base64"):
return thumbnail_data["base64"]
thumbnail = self.files.filter(preset__thumbnail=True).first()
if thumbnail:
return generate_storage_url(str(thumbnail))
return ""
@classmethod
def get_nodes_with_title(cls, title, limit_to_children_of=None):
"""
Returns all ContentNodes with a given title. If limit_to_children_of
is passed in with an id, only look at all the children of the node with that id.
"""
if limit_to_children_of:
root = cls.objects.get(id=limit_to_children_of)
return root.get_descendants().filter(title=title)
return cls.objects.filter(title=title)
def get_details(self, channel_id=None):
"""
Returns information about the node and its children, including total size, languages, files, etc.
:return: A dictionary with detailed statistics and information about the node.
"""
from contentcuration.viewsets.common import SQArrayAgg
from contentcuration.viewsets.common import SQCount
from contentcuration.viewsets.common import SQRelatedArrayAgg
from contentcuration.viewsets.common import SQSum
from contentcuration.viewsets.common import SQJSONBKeyArrayAgg
node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id).order_by()
descendants = (
self.get_descendants()
.values("id")
)
if channel_id:
channel = Channel.objects.filter(id=channel_id)[0]
else:
channel = self.get_channel()
if not descendants.exists():
data = {
"last_update": pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT
),
"created": self.created.strftime(settings.DATE_TIME_FORMAT),
"resource_count": 0,
"resource_size": 0,
"includes": {"coach_content": 0, "exercises": 0},
"kind_count": [],
"languages": [],
"accessible_languages": [],
"licenses": [],
"tags": [],
"copyright_holders": [],
"authors": [],
"aggregators": [],
"providers": [],
"sample_pathway": [],
"original_channels": [],
"sample_nodes": [],
"levels": [],
"categories": [],
}
# Set cache with latest data
cache.set("details_{}".format(self.node_id), json.dumps(data), None)
return data
# Get resources
resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()
nodes = With(
File.objects.filter(contentnode_id__in=Subquery(resources.values("id")))
.values("checksum", "file_size")
.order_by(),
name="nodes",
)
file_query = (
nodes.queryset().with_cte(nodes).values("checksum", "file_size").distinct()
)
l_nodes = With(
File.objects.filter(contentnode_id__in=Subquery(resources.values("id")))
.values("language_id", "preset_id")
.order_by(),
name="l_nodes",
)
accessible_languages_query = (
l_nodes.queryset()
.filter(preset_id=format_presets.VIDEO_SUBTITLE)
.with_cte(l_nodes)
.values("language__native_name")
.distinct()
)
tags_query = str(
ContentTag.objects.filter(
tagged_content__pk__in=descendants.values_list("pk", flat=True)
)
.values("tag_name")
.annotate(count=Count("tag_name"))
.query
).replace("topic", "'topic'")
kind_count_query = str(
resources.values("kind_id").annotate(count=Count("kind_id")).query
).replace("topic", "'topic'")
node = node.annotate(
resource_count=SQCount(resources, field="id"),
resource_size=SQSum(file_query, field="file_size"),
copyright_holders=SQArrayAgg(
resources.distinct("copyright_holder").order_by("copyright_holder"),
field="copyright_holder",
),
authors=SQArrayAgg(
resources.distinct("author").order_by("author"), field="author"
),
aggregators=SQArrayAgg(
resources.distinct("aggregator").order_by("aggregator"),
field="aggregator",
),
providers=SQArrayAgg(
resources.distinct("provider").order_by("provider"), field="provider"
),
languages=SQRelatedArrayAgg(
descendants.exclude(language=None)
.distinct("language__native_name")
.order_by(),
field="language__native_name",
fieldname="native_name",
),
accessible_languages=SQRelatedArrayAgg(
accessible_languages_query,
field="language__native_name",
fieldname="native_name",
),
licenses=SQRelatedArrayAgg(
resources.exclude(license=None)
.distinct("license__license_name")
.order_by("license__license_name"),
field="license__license_name",
fieldname="license_name",
),
kind_count=RawSQL(
"SELECT json_agg(row_to_json (x)) FROM ({}) as x".format(
kind_count_query
),
(),
),
tags_list=RawSQL(
"SELECT json_agg(row_to_json (x)) FROM ({}) as x".format(tags_query), ()
),
coach_content=SQCount(
resources.filter(role_visibility=roles.COACH), field="id"
),
exercises=SQCount(
resources.filter(kind_id=content_kinds.EXERCISE), field="id"
),
levels=SQJSONBKeyArrayAgg(
descendants.exclude(grade_levels__isnull=True),
field="grade_levels",
),
all_categories=SQJSONBKeyArrayAgg(
descendants.exclude(categories__isnull=True),
field="categories",
),
)
# Get sample pathway by getting longest path
# Using resources.aggregate adds a lot of time, use values that have already been fetched
max_level = max(
resources.values_list("level", flat=True).order_by().distinct() or [0]
)
m_nodes = With(
resources.values("id", "level", "tree_id", "lft").order_by(),
name="m_nodes",
)
deepest_node_record = (
m_nodes.queryset()
.with_cte(m_nodes)
.filter(level=max_level)
.values("id")
.order_by("tree_id", "lft")
.first()
)
if deepest_node_record:
deepest_node = ContentNode.objects.get(pk=deepest_node_record["id"])
pathway = (
list(
deepest_node.get_ancestors()
.order_by()
.exclude(parent=None)
.values("title", "node_id", "kind_id")
.order_by()
)
if deepest_node_record
else []
)
sample_nodes = (
[
{
"node_id": n.node_id,
"title": n.title,
"description": n.description,
"thumbnail": n.get_thumbnail(),
"kind": n.kind_id,
}
for n in deepest_node.get_siblings(include_self=True)[0:4]
]
if deepest_node_record
else []
)
# Get list of channels nodes were originally imported from (omitting the current channel)
channel_id = channel and channel.id
originals = (
resources.values("original_channel_id")
.annotate(count=Count("original_channel_id"))
.order_by("original_channel_id")
)
originals = {c["original_channel_id"]: c["count"] for c in originals}
original_channels = (
Channel.objects.exclude(pk=channel_id)
.filter(pk__in=originals.keys(), deleted=False)
.order_by()
)
original_channels = [
{
"id": c.id,
"name": "{}{}".format(
c.name, _(" (Original)") if channel_id == c.id else ""
),
"thumbnail": c.get_thumbnail(),
"count": originals[c.id],
}
for c in original_channels
]
node = (
node.order_by()
.values(
"id",
"resource_count",
"resource_size",
"copyright_holders",
"authors",
"aggregators",
"providers",
"languages",
"accessible_languages",
"coach_content",
"licenses",
"tags_list",
"kind_count",
"exercises",
"levels",
"all_categories",
)
.first()
)
for_educators = {
"coach_content": node["coach_content"],
"exercises": node["exercises"],
}
# Serialize data
data = {
"last_update": pytz.utc.localize(datetime.now()).strftime(
settings.DATE_TIME_FORMAT
),
"created": self.created.strftime(settings.DATE_TIME_FORMAT),
"resource_count": node.get("resource_count", 0),
"resource_size": node.get("resource_size", 0),
"includes": for_educators,
"kind_count": node.get("kind_count") or [],
"languages": node.get("languages") or [],
"accessible_languages": node.get("accessible_languages") or [],
"licenses": node.get("licenses") or [],
"tags": node.get("tags_list") or [],
"original_channels": original_channels,
"sample_pathway": pathway,
"sample_nodes": sample_nodes,
# source model fields for the below default to an empty string, but can also be null
"authors": list(filter(bool, node["authors"])),
"aggregators": list(filter(bool, node["aggregators"])),
"providers": list(filter(bool, node["providers"])),
"copyright_holders": list(filter(bool, node["copyright_holders"])),
"levels": node.get("levels") or [],
"categories": node.get("all_categories") or [],
}
# Set cache with latest data
cache.set("details_{}".format(self.node_id), json.dumps(data), None)
return data
def has_changes(self):
mptt_opts = self._mptt_meta
# Ignore fields that are used for dirty tracking, and also mptt fields, as changes to these are tracked in mptt manager methods.
blacklist = set([
'changed',
'modified',
'publishing',
mptt_opts.tree_id_attr,
mptt_opts.left_attr,
mptt_opts.right_attr,
mptt_opts.level_attr,
])
original_values = self._field_updates.changed()
return any((True for field in original_values if field not in blacklist))
def recalculate_editors_storage(self):
from contentcuration.utils.user import calculate_user_storage
for editor in self.files.values_list('uploaded_by_id', flat=True).distinct():
calculate_user_storage(editor)
def mark_complete(self): # noqa C901
errors = []
# Is complete if title is falsy but only if not a root node.
if not (bool(self.title) or self.parent_id is None):
errors.append("Empty title")
if self.kind_id != content_kinds.TOPIC:
if not self.license:
errors.append("Missing license")
if self.license and self.license.is_custom and not self.license_description:
errors.append("Missing license description for custom license")
if self.license and self.license.copyright_holder_required and not self.copyright_holder:
errors.append("Missing required copyright holder")
if self.kind_id != content_kinds.EXERCISE and not self.files.filter(preset__supplementary=False).exists():
errors.append("Missing default file")
if self.kind_id == content_kinds.EXERCISE:
# Check to see if the exercise has at least one assessment item that has:
if not self.assessment_items.filter(
# Item with non-blank raw data
~Q(raw_data="") | (
# A non-blank question
~Q(question='')
# Non-blank answers
& ~Q(answers='[]')
# With either an input question or one answer marked as correct
& (Q(type=exercises.INPUT_QUESTION) | Q(answers__iregex=r'"correct":\s*true'))
)
).exists():
errors.append("No questions with question text and complete answers")
# Check that it has a mastery model set
# Either check for the previous location for the mastery model, or rely on our completion criteria validation
# that if it has been set, then it has been set correctly.
criterion = self.extra_fields.get("options", {}).get("completion_criteria")
if not (self.extra_fields.get("mastery_model") or criterion):
errors.append("Missing mastery criterion")
if criterion:
try:
completion_criteria.validate(criterion, kind=content_kinds.EXERCISE)
except completion_criteria.ValidationError:
errors.append("Mastery criterion is defined but is invalid")
self.complete = not errors
return errors
def make_content_id_unique(self):
"""
If self is NOT an original contentnode (in other words, a copied contentnode)
and a contentnode with same content_id exists then we update self's content_id.
"""
is_node_original = self.original_source_node_id is None or self.original_source_node_id == self.node_id
node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(content_id=self.content_id)
if (not is_node_original) and node_same_content_id.exists():
ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.uuid4().hex)
def on_create(self):
self.changed = True
self.recalculate_editors_storage()
self.set_default_learning_activity()
def on_update(self):
self.changed = self.changed or self.has_changes()
def move_to(self, target, *args, **kwargs):
parent_was_trashtree = self.parent.channel_trash.exists()
super(ContentNode, self).move_to(target, *args, **kwargs)
self.save()
# Update tree_id cache when node is moved to another tree
cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.tree_id, None)
# Recalculate storage if node was moved to or from the trash tree
if target.channel_trash.exists() or parent_was_trashtree:
self.recalculate_editors_storage()
def set_default_learning_activity(self):
if self.learning_activities is None:
if self.kind in kind_activity_map:
self.learning_activities = {
kind_activity_map[self.kind]: True
}
def save(self, skip_lock=False, *args, **kwargs):
if self._state.adding:
self.on_create()
else:
self.on_update()
# Logic borrowed from mptt - do a simple check to see if we have changed
# the parent of the node. We use the mptt specific cached fields here
# because these get updated by the mptt move methods, and so will be up to
# date, meaning we can avoid locking the DB twice when the fields have already
# been updated in the database.
# If most moves are being done independently of just changing the parent
# and then calling a save, locking within the save method itself should rarely
# be triggered - meaning updates to contentnode metadata should only rarely
# trigger a write lock on mptt fields.
old_parent_id = self._field_updates.changed().get("parent_id")
if self._state.adding and (self.parent_id or self.parent):
same_order = False
elif old_parent_id is DeferredAttribute:
same_order = True
else:
same_order = old_parent_id == self.parent_id
if not same_order:
changed_ids = list(filter(lambda x: x is not None, set([old_parent_id, self.parent_id])))
else:
changed_ids = []
if not same_order and not skip_lock:
# Lock the mptt fields for the trees of the old and new parent
with ContentNode.objects.lock_mptt(*ContentNode.objects
.filter(id__in=[pid for pid in [old_parent_id, self.parent_id] if pid])
.values_list('tree_id', flat=True).distinct()):
super(ContentNode, self).save(*args, **kwargs)
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed=True)
else:
super(ContentNode, self).save(*args, **kwargs)
# Always write to the database for the parent change updates, as we have
# no persistent object references for the original and new parent to modify
if changed_ids:
ContentNode.objects.filter(id__in=changed_ids).update(changed=True)
# Copied from MPTT
save.alters_data = True
def delete(self, *args, **kwargs):
parent = self.parent or self._field_updates.changed().get('parent')
if parent:
parent.changed = True
parent.save()
self.recalculate_editors_storage()
# Lock the mptt fields for the tree of this node
with ContentNode.objects.lock_mptt(self.tree_id):
return super(ContentNode, self).delete(*args, **kwargs)
# Copied from MPTT
delete.alters_data = True
def copy_to(
self,
target=None,
position="last-child",
pk=None,
mods=None,
excluded_descendants=None,
can_edit_source_channel=None,
batch_size=None,
progress_tracker=None
):
return self._tree_manager.copy_node(self, target, position, pk, mods, excluded_descendants, can_edit_source_channel, batch_size, progress_tracker)[0]
def copy(self):
return self.copy_to()
def is_publishable(self):
return self.complete and self.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists()
class Meta:
verbose_name = "Topic"
verbose_name_plural = "Topics"
# Do not allow two nodes with the same name on the same level
# unique_together = ('parent', 'title')
indexes = [
models.Index(fields=["node_id"], name=NODE_ID_INDEX_NAME),
models.Index(fields=["-modified"], name=NODE_MODIFIED_DESC_INDEX_NAME),
]
class ContentKind(models.Model):
kind = models.CharField(primary_key=True, max_length=200, choices=content_kinds.choices)
def __str__(self):
return self.kind
class FileFormat(models.Model):
extension = models.CharField(primary_key=True, max_length=40, choices=file_formats.choices)
mimetype = models.CharField(max_length=200, blank=True)
def __str__(self):
return self.extension
class FormatPreset(models.Model):
id = models.CharField(primary_key=True, max_length=150, choices=format_presets.choices)
readable_name = models.CharField(max_length=400)
multi_language = models.BooleanField(default=False)
supplementary = models.BooleanField(default=False)
thumbnail = models.BooleanField(default=False)
subtitle = models.BooleanField(default=False)
display = models.BooleanField(default=True) # Render on client side
order = models.IntegerField(default=0)
kind = models.ForeignKey(ContentKind, related_name='format_presets', null=True, on_delete=models.SET_NULL)
allowed_formats = models.ManyToManyField(FileFormat, blank=True)
def __str__(self):
return self.id
@classmethod
def guess_format_preset(cls, filename):
"""
Guess the format preset of a filename based on its extension.
Return None if format is unknown.
"""
_, ext = os.path.splitext(filename)
ext = ext.lstrip(".")
f = FormatPreset.objects.filter(
allowed_formats__extension=ext,
display=True
)
return f.first()
@classmethod
def get_preset(cls, preset_name):
"""
Get the FormatPreset object with that exact name.
Returns None if that format preset is not found.
"""
try:
return FormatPreset.objects.get(id=preset_name)
except FormatPreset.DoesNotExist:
return None
class Language(models.Model):
id = models.CharField(max_length=14, primary_key=True)
lang_code = models.CharField(max_length=3, db_index=True)
lang_subcode = models.CharField(max_length=10, db_index=True, blank=True, null=True)
readable_name = models.CharField(max_length=100, blank=True)
native_name = models.CharField(max_length=100, blank=True)
lang_direction = models.CharField(max_length=3, choices=languages.LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])
def ietf_name(self):
return "{code}-{subcode}".format(code=self.lang_code,
subcode=self.lang_subcode) if self.lang_subcode else self.lang_code
def __str__(self):
return self.ietf_name()
ASSESSMENT_ID_INDEX_NAME = "assessment_id_idx"
class AssessmentItem(models.Model):
type = models.CharField(max_length=50, default="multiplechoice")
question = models.TextField(blank=True)
hints = models.TextField(default="[]")
answers = models.TextField(default="[]")
order = models.IntegerField(default=1)
contentnode = models.ForeignKey('ContentNode', related_name="assessment_items", blank=True, null=True,
db_index=True, on_delete=models.CASCADE)
# Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta
assessment_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
raw_data = models.TextField(blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
randomize = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = CustomManager()
# Track all updates
_field_updates = FieldTracker()
def has_changes(self):
return bool(self._field_updates.changed())
class Meta:
indexes = [
models.Index(fields=["assessment_id"], name=ASSESSMENT_ID_INDEX_NAME),
]
unique_together = ['contentnode', 'assessment_id']
_permission_filter = Q(tree_id=OuterRef("contentnode__tree_id"))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
edit_cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(edit_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(edit=True)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(
public=True, main_tree__tree_id=OuterRef("contentnode__tree_id")
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))
def on_create(self):
"""
When an exercise is added to a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def on_update(self):
"""
When an exercise is updated of a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
def delete(self, *args, **kwargs):
"""
When an exercise is deleted from a contentnode, update its content_id
if it's a copied contentnode.
"""
self.contentnode.make_content_id_unique()
return super(AssessmentItem, self).delete(*args, **kwargs)
class SlideshowSlide(models.Model):
contentnode = models.ForeignKey('ContentNode', related_name="slideshow_slides", blank=True, null=True,
db_index=True, on_delete=models.CASCADE)
sort_order = models.FloatField(default=1.0)
metadata = JSONField(default=dict)
class StagedFile(models.Model):
"""
Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit
"""
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='staged_files', blank=True, null=True, on_delete=models.CASCADE)
FILE_DISTINCT_INDEX_NAME = "file_checksum_file_size_idx"
FILE_MODIFIED_DESC_INDEX_NAME = "file_modified_desc_idx"
FILE_DURATION_CONSTRAINT = "file_media_duration_int"
MEDIA_PRESETS = [
format_presets.AUDIO,
format_presets.AUDIO_DEPENDENCY,
format_presets.VIDEO_HIGH_RES,
format_presets.VIDEO_LOW_RES,
format_presets.VIDEO_DEPENDENCY,
]
class File(models.Model):
"""
The bottom layer of the contentDB schema, defines the basic building brick for content.
Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...
"""
id = UUIDField(primary_key=True, default=uuid.uuid4)
checksum = models.CharField(max_length=400, blank=True, db_index=True)
file_size = models.IntegerField(blank=True, null=True)
file_on_disk = models.FileField(upload_to=object_storage_name, storage=default_storage, max_length=500,
blank=True)
contentnode = models.ForeignKey(ContentNode, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
assessment_item = models.ForeignKey(AssessmentItem, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
slideshow_slide = models.ForeignKey(SlideshowSlide, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
file_format = models.ForeignKey(FileFormat, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
preset = models.ForeignKey(FormatPreset, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)
language = models.ForeignKey(Language, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)
original_filename = models.CharField(max_length=255, blank=True)
source_url = models.CharField(max_length=400, blank=True, null=True)
uploaded_by = models.ForeignKey(User, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)
modified = models.DateTimeField(auto_now=True, verbose_name="modified", null=True)
duration = models.IntegerField(blank=True, null=True)
objects = CustomManager()
_permission_filter = Q(tree_id=OuterRef("contentnode__tree_id")) | Q(tree_id=OuterRef("assessment_item__contentnode__tree_id"))
@classmethod
def filter_edit_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
if not user_id:
return queryset.none()
cte = PermissionCTE.editable_channels(user_id)
queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls._permission_filter))
if user.is_admin:
return queryset
return queryset.filter(
Q(edit=True) | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)
)
@classmethod
def filter_view_queryset(cls, queryset, user):
user_id = not user.is_anonymous and user.id
queryset = queryset.annotate(
public=Exists(
Channel.objects.filter(public=True).filter(
Q(main_tree__tree_id=OuterRef("contentnode__tree_id"))
| Q(main_tree__tree_id=OuterRef("assessment_item__contentnode__tree_id"))
).values("pk")
),
)
if not user_id:
return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)
edit_cte = PermissionCTE.editable_channels(user_id)
view_cte = PermissionCTE.view_only_channels(user_id)
queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(
edit=edit_cte.exists(cls._permission_filter),
view=view_cte.exists(cls._permission_filter),
)
if user.is_admin:
return queryset
return queryset.filter(
Q(view=True)
| Q(edit=True)
| Q(public=True)
| Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)
)
class Admin:
pass
def __str__(self):
return '{checksum}{extension}'.format(checksum=self.checksum, extension='.' + self.file_format.extension)
def filename(self):
"""
Returns just the filename of the File in storage, without the path
e.g. abcd.mp4
"""
# TODO(aron): write tests for this
return os.path.basename(self.file_on_disk.name)
def update_contentnode_content_id(self):
"""
If the file is attached to a contentnode and is not a thumbnail
then update that contentnode's content_id if it's a copied contentnode.
"""
if self.contentnode and self.preset.thumbnail is False:
self.contentnode.make_content_id_unique()
def on_update(self):
# since modified was added later as a nullable field to File, we don't use a default but
# instead we'll just make sure it's always updated through our serializers
self.modified = timezone.now()
self.update_contentnode_content_id()
def save(self, set_by_file_on_disk=True, *args, **kwargs):
"""
Overrider the default save method.
If the file_on_disk FileField gets passed a content copy:
1. generate the MD5 from the content copy
2. fill the other fields accordingly
"""
from contentcuration.utils.user import calculate_user_storage
# check if the file format exists in file_formats.choices
if self.file_format_id:
if self.file_format_id not in dict(file_formats.choices):
raise ValidationError("Invalid file_format")
if set_by_file_on_disk and self.file_on_disk: # if file_on_disk is supplied, hash out the file
if self.checksum is None or self.checksum == "":
md5 = hashlib.md5()
for chunk in self.file_on_disk.chunks():
md5.update(chunk)
self.checksum = md5.hexdigest()
if not self.file_size:
self.file_size = self.file_on_disk.size
if not self.file_format_id:
ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')
if ext in list(dict(file_formats.choices).keys()):
self.file_format_id = ext
else:
raise ValueError("Files of type `{}` are not supported.".format(ext))
super(File, self).save(*args, **kwargs)
if self.uploaded_by_id:
calculate_user_storage(self.uploaded_by_id)
class Meta:
indexes = [
models.Index(fields=['checksum', 'file_size'], name=FILE_DISTINCT_INDEX_NAME),
models.Index(fields=["-modified"], name=FILE_MODIFIED_DESC_INDEX_NAME),
]
constraints = [
# enforces that duration is null when not a media preset, but the duration may be null for media presets
# but if not-null, should be greater than 0
models.CheckConstraint(
check=(Q(preset__in=MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True)),
name=FILE_DURATION_CONSTRAINT
)
]
@receiver(models.signals.post_delete, sender=File)
def auto_delete_file_on_delete(sender, instance, **kwargs):
"""
Deletes file from filesystem if no other File objects are referencing the same file on disk
when corresponding `File` object is deleted.
Be careful! we don't know if this will work when perform bash delete on File obejcts.
"""
# Recalculate storage
from contentcuration.utils.user import calculate_user_storage
if instance.uploaded_by_id:
calculate_user_storage(instance.uploaded_by_id)
def delete_empty_file_reference(checksum, extension):
filename = checksum + '.' + extension
if not File.objects.filter(checksum=checksum).exists() and not Channel.objects.filter(thumbnail=filename).exists():
storage_path = generate_object_storage_name(checksum, filename)
if default_storage.exists(storage_path):
default_storage.delete(storage_path)
class PrerequisiteContentRelationship(models.Model):
"""
Predefine the prerequisite relationship between two ContentNode objects.
"""
target_node = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)
prerequisite = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)
class Meta:
unique_together = ['target_node', 'prerequisite']
def clean(self, *args, **kwargs):
# self reference exception
if self.target_node == self.prerequisite:
raise IntegrityError('Cannot self reference as prerequisite.')
# immediate cyclic exception
if PrerequisiteContentRelationship.objects.using(self._state.db) \
.filter(target_node=self.prerequisite, prerequisite=self.target_node):
raise IntegrityError(
'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'
% (self.target_node, self.prerequisite))
# distant cyclic exception
# elif <this is a nice to have exception, may implement in the future when the priority raises.>
# raise Exception('Note: Prerequisite relationship is acyclic! %s and %s forms a closed loop!' % (
# self.target_node, self.prerequisite
# ))
super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)
def save(self, *args, **kwargs):
self.full_clean()
super(PrerequisiteContentRelationship, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % (self.pk)
class RelatedContentRelationship(models.Model):
"""
Predefine the related relationship between two ContentNode objects.
"""
contentnode_1 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_1', on_delete=models.CASCADE)
contentnode_2 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_2', on_delete=models.CASCADE)
class Meta:
unique_together = ['contentnode_1', 'contentnode_2']
def save(self, *args, **kwargs):
# self reference exception
if self.contentnode_1 == self.contentnode_2:
raise IntegrityError('Cannot self reference as related.')
# handle immediate cyclic
if RelatedContentRelationship.objects.using(self._state.db) \
.filter(contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1):
return # silently cancel the save
super(RelatedContentRelationship, self).save(*args, **kwargs)
class Invitation(models.Model):
""" Invitation to edit channel """
id = UUIDField(primary_key=True, default=uuid.uuid4)
accepted = models.BooleanField(default=False)
declined = models.BooleanField(default=False)
revoked = models.BooleanField(default=False)
invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, related_name='sent_to')
share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)
email = models.EmailField(max_length=100, null=True)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_by', null=True, on_delete=models.CASCADE)
channel = models.ForeignKey('Channel', null=True, related_name='pending_editors', on_delete=models.CASCADE)
first_name = models.CharField(max_length=100, blank=True)
last_name = models.CharField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = "Invitation"
verbose_name_plural = "Invitations"
def accept(self):
user = User.objects.filter(email__iexact=self.email).first()
if self.channel:
# channel is a nullable field, so check that it exists.
if self.share_mode == VIEW_ACCESS:
self.channel.editors.remove(user)
self.channel.viewers.add(user)
else:
self.channel.viewers.remove(user)
self.channel.editors.add(user)
@classmethod
def filter_edit_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(
Q(email__iexact=user.email)
| Q(sender=user)
| Q(channel__editors=user)
).distinct()
@classmethod
def filter_view_queryset(cls, queryset, user):
if user.is_anonymous:
return queryset.none()
if user.is_admin:
return queryset
return queryset.filter(
Q(email__iexact=user.email)
| Q(sender=user)
| Q(channel__editors=user)
| Q(channel__viewers=user)
).distinct()
class Change(models.Model):
server_rev = models.BigAutoField(primary_key=True)
# We need to store the user who is applying this change
# so that we can validate they have permissions to do so
# allow to be null so that we don't lose changes if a user
# account is hard deleted.
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL, related_name="changes_by_user")
# Almost all changes are related to channels, but some are specific only to users
# so we allow this to be nullable for these edge cases.
# Indexed by default because it's a ForeignKey field.
channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=models.CASCADE)
# For those changes related to users, store a user value instead of channel
# this may be different to created_by, as changes to invitations affect individual users.
# Indexed by default because it's a ForeignKey field.
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE, related_name="changes_about_user")
# Use client_rev to keep track of changes coming from the client side
# but let it be blank or null for changes we generate on the server side
client_rev = models.IntegerField(null=True, blank=True)
# client_rev numbers are by session, we add the session key here for bookkeeping
# to allow a check within the same session to return whether a change has been applied
# or not, and hence remove it from the frontend
session = models.ForeignKey(Session, null=True, blank=True, on_delete=models.SET_NULL)
table = models.CharField(max_length=32)
change_type = models.IntegerField()
# Use the DRF JSONEncoder class as the encoder here
# so that we can handle anything that has been deserialized by DRF
# or that will be later be serialized by DRF
kwargs = JSONField(encoder=JSONEncoder)
applied = models.BooleanField(default=False)
errored = models.BooleanField(default=False)
@classmethod
def _create_from_change(cls, created_by_id=None, channel_id=None, user_id=None, session_key=None, applied=False, table=None, rev=None, **data):
change_type = data.pop("type")
if table is None or table not in ALL_TABLES:
raise TypeError("table is a required argument for creating changes and must be a valid table name")
if change_type is None or change_type not in ALL_CHANGES:
raise TypeError("change_type is a required argument for creating changes and must be a valid change type integer")
return cls(
session_id=session_key,
created_by_id=created_by_id,
channel_id=channel_id,
user_id=user_id,
client_rev=rev,
table=table,
change_type=change_type,
kwargs=data,
applied=applied
)
@classmethod
def create_changes(cls, changes, created_by_id=None, session_key=None, applied=False):
change_models = []
for change in changes:
change_models.append(cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, **change))
cls.objects.bulk_create(change_models)
return change_models
@classmethod
def create_change(cls, change, created_by_id=None, session_key=None, applied=False):
obj = cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, **change)
obj.save()
return obj
@classmethod
def serialize(cls, change):
datum = get_attribute(change, ["kwargs"]).copy()
datum.update({
"server_rev": get_attribute(change, ["server_rev"]),
"table": get_attribute(change, ["table"]),
"type": get_attribute(change, ["change_type"]),
"channel_id": get_attribute(change, ["channel_id"]),
"user_id": get_attribute(change, ["user_id"]),
"created_by_id": get_attribute(change, ["created_by_id"])
})
return datum
def serialize_to_change_dict(self):
return self.serialize(self)
class TaskResultCustom(object):
"""
Custom fields to add to django_celery_results's TaskResult model
If adding fields to this class, run `makemigrations` then move the generated migration from the
`django_celery_results` app to the `contentcuration` app and override the constructor to change
the app_label. See `0141_add_task_signature` for an example
"""
# user shouldn't be null, but in order to append the field, this needs to be allowed
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="tasks", on_delete=models.CASCADE, null=True)
channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)
progress = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0), MaxValueValidator(100)])
# a hash of the task name and kwargs for identifying repeat tasks
signature = models.CharField(null=True, blank=False, max_length=32)
super_as_dict = TaskResult.as_dict
def as_dict(self):
"""
:return: A dictionary representation
"""
super_dict = self.super_as_dict()
super_dict.update(
user_id=self.user_id,
channel_id=self.channel_id,
progress=self.progress,
)
return super_dict
@classmethod
def contribute_to_class(cls, model_class=TaskResult):
"""
Adds fields to model, by default TaskResult
:param model_class: TaskResult model
"""
for field in dir(cls):
if not field.startswith("_") and field not in ('contribute_to_class', 'Meta'):
model_class.add_to_class(field, getattr(cls, field))
# manually add Meta afterwards
setattr(model_class._meta, 'indexes', getattr(model_class._meta, 'indexes', []) + cls.Meta.indexes)
class Meta:
indexes = [
# add index that matches query usage for signature
models.Index(
fields=['signature'],
name='task_result_signature_idx',
condition=Q(status__in=celery_states.UNREADY_STATES),
),
]
# trigger class contributions immediately
TaskResultCustom.contribute_to_class()
|
normal
|
{
"blob_id": "32e904a39d03d3166369420b49db0b9b118110a3",
"index": 4179,
"step-1": "<mask token>\n\n\nclass ContentKind(models.Model):\n <mask token>\n\n def __str__(self):\n return self.kind\n\n\nclass FileFormat(models.Model):\n extension = models.CharField(primary_key=True, max_length=40, choices=\n file_formats.choices)\n mimetype = models.CharField(max_length=200, blank=True)\n\n def __str__(self):\n return self.extension\n\n\nclass FormatPreset(models.Model):\n id = models.CharField(primary_key=True, max_length=150, choices=\n format_presets.choices)\n readable_name = models.CharField(max_length=400)\n multi_language = models.BooleanField(default=False)\n supplementary = models.BooleanField(default=False)\n thumbnail = models.BooleanField(default=False)\n subtitle = models.BooleanField(default=False)\n display = models.BooleanField(default=True)\n order = models.IntegerField(default=0)\n kind = models.ForeignKey(ContentKind, related_name='format_presets',\n null=True, on_delete=models.SET_NULL)\n allowed_formats = models.ManyToManyField(FileFormat, blank=True)\n\n def __str__(self):\n return self.id\n\n @classmethod\n def guess_format_preset(cls, filename):\n \"\"\"\n Guess the format preset of a filename based on its extension.\n\n Return None if format is unknown.\n \"\"\"\n _, ext = os.path.splitext(filename)\n ext = ext.lstrip('.')\n f = FormatPreset.objects.filter(allowed_formats__extension=ext,\n display=True)\n return f.first()\n\n @classmethod\n def get_preset(cls, preset_name):\n \"\"\"\n Get the FormatPreset object with that exact name.\n\n Returns None if that format preset is not found.\n \"\"\"\n try:\n return FormatPreset.objects.get(id=preset_name)\n except FormatPreset.DoesNotExist:\n return None\n\n\nclass Language(models.Model):\n id = models.CharField(max_length=14, primary_key=True)\n lang_code = models.CharField(max_length=3, db_index=True)\n lang_subcode = models.CharField(max_length=10, db_index=True, blank=\n True, null=True)\n readable_name = models.CharField(max_length=100, blank=True)\n native_name = models.CharField(max_length=100, blank=True)\n lang_direction = models.CharField(max_length=3, choices=languages.\n LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])\n\n def ietf_name(self):\n return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.\n lang_subcode) if self.lang_subcode else self.lang_code\n\n def __str__(self):\n return self.ietf_name()\n\n\n<mask token>\n\n\nclass AssessmentItem(models.Model):\n type = models.CharField(max_length=50, default='multiplechoice')\n question = models.TextField(blank=True)\n hints = models.TextField(default='[]')\n answers = models.TextField(default='[]')\n order = models.IntegerField(default=1)\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'assessment_items', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,\n editable=False)\n raw_data = models.TextField(blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n randomize = models.BooleanField(default=False)\n deleted = models.BooleanField(default=False)\n objects = CustomManager()\n _field_updates = FieldTracker()\n\n def has_changes(self):\n return bool(self._field_updates.changed())\n\n\n class Meta:\n indexes = [models.Index(fields=['assessment_id'], name=\n ASSESSMENT_ID_INDEX_NAME)]\n unique_together = ['contentnode', 'assessment_id']\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'\n )).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n def on_create(self):\n \"\"\"\n When an exercise is added to a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n \"\"\"\n When an exercise is updated of a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n When an exercise is deleted from a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n return super(AssessmentItem, self).delete(*args, **kwargs)\n\n\nclass SlideshowSlide(models.Model):\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'slideshow_slides', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n sort_order = models.FloatField(default=1.0)\n metadata = JSONField(default=dict)\n\n\nclass StagedFile(models.Model):\n \"\"\"\n Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit\n \"\"\"\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='staged_files',\n blank=True, null=True, on_delete=models.CASCADE)\n\n\n<mask token>\n\n\nclass File(models.Model):\n \"\"\"\n The bottom layer of the contentDB schema, defines the basic building brick for content.\n Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n file_on_disk = models.FileField(upload_to=object_storage_name, storage=\n default_storage, max_length=500, blank=True)\n contentnode = models.ForeignKey(ContentNode, related_name='files',\n blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n assessment_item = models.ForeignKey(AssessmentItem, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n file_format = models.ForeignKey(FileFormat, related_name='files', blank\n =True, null=True, db_index=True, on_delete=models.SET_NULL)\n preset = models.ForeignKey(FormatPreset, related_name='files', blank=\n True, null=True, db_index=True, on_delete=models.SET_NULL)\n language = models.ForeignKey(Language, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n original_filename = models.CharField(max_length=255, blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n modified = models.DateTimeField(auto_now=True, verbose_name='modified',\n null=True)\n duration = models.IntegerField(blank=True, null=True)\n objects = CustomManager()\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(\n tree_id=OuterRef('assessment_item__contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.\n _permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(edit=True) | Q(uploaded_by=user,\n contentnode__isnull=True, assessment_item__isnull=True))\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True).filter(Q(main_tree__tree_id=OuterRef(\n 'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(\n 'assessment_item__contentnode__tree_id'))).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |\n Q(uploaded_by=user, contentnode__isnull=True,\n assessment_item__isnull=True))\n\n\n class Admin:\n pass\n\n def __str__(self):\n return '{checksum}{extension}'.format(checksum=self.checksum,\n extension='.' + self.file_format.extension)\n\n def filename(self):\n \"\"\"\n Returns just the filename of the File in storage, without the path\n\n e.g. abcd.mp4\n \"\"\"\n return os.path.basename(self.file_on_disk.name)\n\n def update_contentnode_content_id(self):\n \"\"\"\n If the file is attached to a contentnode and is not a thumbnail\n then update that contentnode's content_id if it's a copied contentnode.\n \"\"\"\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n self.modified = timezone.now()\n self.update_contentnode_content_id()\n\n def save(self, set_by_file_on_disk=True, *args, **kwargs):\n \"\"\"\n Overrider the default save method.\n If the file_on_disk FileField gets passed a content copy:\n 1. generate the MD5 from the content copy\n 2. fill the other fields accordingly\n \"\"\"\n from contentcuration.utils.user import calculate_user_storage\n if self.file_format_id:\n if self.file_format_id not in dict(file_formats.choices):\n raise ValidationError('Invalid file_format')\n if set_by_file_on_disk and self.file_on_disk:\n if self.checksum is None or self.checksum == '':\n md5 = hashlib.md5()\n for chunk in self.file_on_disk.chunks():\n md5.update(chunk)\n self.checksum = md5.hexdigest()\n if not self.file_size:\n self.file_size = self.file_on_disk.size\n if not self.file_format_id:\n ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')\n if ext in list(dict(file_formats.choices).keys()):\n self.file_format_id = ext\n else:\n raise ValueError('Files of type `{}` are not supported.'\n .format(ext))\n super(File, self).save(*args, **kwargs)\n if self.uploaded_by_id:\n calculate_user_storage(self.uploaded_by_id)\n\n\n class Meta:\n indexes = [models.Index(fields=['checksum', 'file_size'], name=\n FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],\n name=FILE_MODIFIED_DESC_INDEX_NAME)]\n constraints = [models.CheckConstraint(check=Q(preset__in=\n MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name\n =FILE_DURATION_CONSTRAINT)]\n\n\n<mask token>\n\n\nclass PrerequisiteContentRelationship(models.Model):\n \"\"\"\n Predefine the prerequisite relationship between two ContentNode objects.\n \"\"\"\n target_node = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)\n prerequisite = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['target_node', 'prerequisite']\n\n def clean(self, *args, **kwargs):\n if self.target_node == self.prerequisite:\n raise IntegrityError('Cannot self reference as prerequisite.')\n if PrerequisiteContentRelationship.objects.using(self._state.db\n ).filter(target_node=self.prerequisite, prerequisite=self.\n target_node):\n raise IntegrityError(\n 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'\n % (self.target_node, self.prerequisite))\n super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super(PrerequisiteContentRelationship, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n\nclass RelatedContentRelationship(models.Model):\n \"\"\"\n Predefine the related relationship between two ContentNode objects.\n \"\"\"\n contentnode_1 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_1', on_delete=models.CASCADE)\n contentnode_2 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_2', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['contentnode_1', 'contentnode_2']\n\n def save(self, *args, **kwargs):\n if self.contentnode_1 == self.contentnode_2:\n raise IntegrityError('Cannot self reference as related.')\n if RelatedContentRelationship.objects.using(self._state.db).filter(\n contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1\n ):\n return\n super(RelatedContentRelationship, self).save(*args, **kwargs)\n\n\nclass Invitation(models.Model):\n \"\"\" Invitation to edit channel \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n accepted = models.BooleanField(default=False)\n declined = models.BooleanField(default=False)\n revoked = models.BooleanField(default=False)\n invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n SET_NULL, null=True, related_name='sent_to')\n share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)\n email = models.EmailField(max_length=100, null=True)\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\n 'sent_by', null=True, on_delete=models.CASCADE)\n channel = models.ForeignKey('Channel', null=True, related_name=\n 'pending_editors', on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100, blank=True)\n last_name = models.CharField(max_length=100, blank=True, null=True)\n\n\n class Meta:\n verbose_name = 'Invitation'\n verbose_name_plural = 'Invitations'\n\n def accept(self):\n user = User.objects.filter(email__iexact=self.email).first()\n if self.channel:\n if self.share_mode == VIEW_ACCESS:\n self.channel.editors.remove(user)\n self.channel.viewers.add(user)\n else:\n self.channel.viewers.remove(user)\n self.channel.editors.add(user)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user)).distinct()\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user) | Q(channel__viewers=user)).distinct()\n\n\nclass Change(models.Model):\n server_rev = models.BigAutoField(primary_key=True)\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,\n blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')\n channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=\n models.CASCADE)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=\n True, on_delete=models.CASCADE, related_name='changes_about_user')\n client_rev = models.IntegerField(null=True, blank=True)\n session = models.ForeignKey(Session, null=True, blank=True, on_delete=\n models.SET_NULL)\n table = models.CharField(max_length=32)\n change_type = models.IntegerField()\n kwargs = JSONField(encoder=JSONEncoder)\n applied = models.BooleanField(default=False)\n errored = models.BooleanField(default=False)\n\n @classmethod\n def _create_from_change(cls, created_by_id=None, channel_id=None,\n user_id=None, session_key=None, applied=False, table=None, rev=None,\n **data):\n change_type = data.pop('type')\n if table is None or table not in ALL_TABLES:\n raise TypeError(\n 'table is a required argument for creating changes and must be a valid table name'\n )\n if change_type is None or change_type not in ALL_CHANGES:\n raise TypeError(\n 'change_type is a required argument for creating changes and must be a valid change type integer'\n )\n return cls(session_id=session_key, created_by_id=created_by_id,\n channel_id=channel_id, user_id=user_id, client_rev=rev, table=\n table, change_type=change_type, kwargs=data, applied=applied)\n\n @classmethod\n def create_changes(cls, changes, created_by_id=None, session_key=None,\n applied=False):\n change_models = []\n for change in changes:\n change_models.append(cls._create_from_change(created_by_id=\n created_by_id, session_key=session_key, applied=applied, **\n change))\n cls.objects.bulk_create(change_models)\n return change_models\n\n @classmethod\n def create_change(cls, change, created_by_id=None, session_key=None,\n applied=False):\n obj = cls._create_from_change(created_by_id=created_by_id,\n session_key=session_key, applied=applied, **change)\n obj.save()\n return obj\n\n @classmethod\n def serialize(cls, change):\n datum = get_attribute(change, ['kwargs']).copy()\n datum.update({'server_rev': get_attribute(change, ['server_rev']),\n 'table': get_attribute(change, ['table']), 'type':\n get_attribute(change, ['change_type']), 'channel_id':\n get_attribute(change, ['channel_id']), 'user_id': get_attribute\n (change, ['user_id']), 'created_by_id': get_attribute(change, [\n 'created_by_id'])})\n return datum\n\n def serialize_to_change_dict(self):\n return self.serialize(self)\n\n\nclass TaskResultCustom(object):\n \"\"\"\n Custom fields to add to django_celery_results's TaskResult model\n\n If adding fields to this class, run `makemigrations` then move the generated migration from the\n `django_celery_results` app to the `contentcuration` app and override the constructor to change\n the app_label. See `0141_add_task_signature` for an example\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',\n on_delete=models.CASCADE, null=True)\n channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)\n progress = models.IntegerField(null=True, blank=True, validators=[\n MinValueValidator(0), MaxValueValidator(100)])\n signature = models.CharField(null=True, blank=False, max_length=32)\n super_as_dict = TaskResult.as_dict\n\n def as_dict(self):\n \"\"\"\n :return: A dictionary representation\n \"\"\"\n super_dict = self.super_as_dict()\n super_dict.update(user_id=self.user_id, channel_id=self.channel_id,\n progress=self.progress)\n return super_dict\n\n @classmethod\n def contribute_to_class(cls, model_class=TaskResult):\n \"\"\"\n Adds fields to model, by default TaskResult\n :param model_class: TaskResult model\n \"\"\"\n for field in dir(cls):\n if not field.startswith('_') and field not in (\n 'contribute_to_class', 'Meta'):\n model_class.add_to_class(field, getattr(cls, field))\n setattr(model_class._meta, 'indexes', getattr(model_class._meta,\n 'indexes', []) + cls.Meta.indexes)\n\n\n class Meta:\n indexes = [models.Index(fields=['signature'], name=\n 'task_result_signature_idx', condition=Q(status__in=\n celery_states.UNREADY_STATES))]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass License(models.Model):\n <mask token>\n license_name = models.CharField(max_length=50)\n license_url = models.URLField(blank=True)\n license_description = models.TextField(blank=True)\n copyright_holder_required = models.BooleanField(default=True)\n is_custom = models.BooleanField(default=False)\n exists = models.BooleanField(default=False, verbose_name=\n 'license exists', help_text=\n 'Tells whether or not a content item is licensed to share')\n\n @classmethod\n def validate_name(cls, name):\n if cls.objects.filter(license_name=name).count() == 0:\n raise ValidationError('License `{}` does not exist'.format(name))\n\n def __str__(self):\n return self.license_name\n\n\n<mask token>\n\n\nclass ContentNode(MPTTModel, models.Model):\n \"\"\"\n By default, all nodes have a title and can be used as a topic.\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=\n False, db_index=True)\n node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)\n original_channel_id = UUIDField(primary_key=False, editable=False, null\n =True, db_index=True)\n source_channel_id = UUIDField(primary_key=False, editable=False, null=True)\n original_source_node_id = UUIDField(primary_key=False, editable=False,\n null=True, db_index=True)\n source_node_id = UUIDField(primary_key=False, editable=False, null=True)\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n title = models.CharField(max_length=200, blank=True)\n description = models.TextField(blank=True)\n kind = models.ForeignKey('ContentKind', related_name='contentnodes',\n db_index=True, null=True, blank=True, on_delete=models.SET_NULL)\n license = models.ForeignKey('License', null=True, blank=True, on_delete\n =models.SET_NULL)\n license_description = models.CharField(max_length=400, null=True, blank\n =True)\n prerequisite = models.ManyToManyField('self', related_name=\n 'is_prerequisite_of', through='PrerequisiteContentRelationship',\n symmetrical=False, blank=True)\n is_related = models.ManyToManyField('self', related_name='relate_to',\n through='RelatedContentRelationship', symmetrical=False, blank=True)\n language = models.ForeignKey('Language', null=True, blank=True,\n related_name='content_language', on_delete=models.SET_NULL)\n parent = TreeForeignKey('self', null=True, blank=True, related_name=\n 'children', db_index=True, on_delete=models.CASCADE)\n tags = models.ManyToManyField(ContentTag, symmetrical=False,\n related_name='tagged_content', blank=True)\n sort_order = models.FloatField(max_length=50, default=1, verbose_name=\n 'sort order', help_text='Ascending, lowest number shown first')\n copyright_holder = models.CharField(max_length=200, null=True, blank=\n True, default='', help_text=\n 'Organization of person who holds the essential rights')\n original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='duplicates')\n cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='clones')\n thumbnail_encoding = models.TextField(blank=True, null=True)\n created = models.DateTimeField(default=timezone.now, verbose_name='created'\n )\n modified = models.DateTimeField(auto_now=True, verbose_name='modified')\n published = models.BooleanField(default=False)\n publishing = models.BooleanField(default=False)\n complete = models.BooleanField(null=True)\n changed = models.BooleanField(default=True)\n \"\"\"\n Extra fields for exercises:\n - type: mastery model to use to determine completion\n - m: m value for M out of N mastery criteria\n - n: n value for M out of N mastery criteria\n \"\"\"\n extra_fields = JSONField(default=dict, blank=True, null=True)\n author = models.CharField(max_length=200, blank=True, default='',\n help_text='Who created this content?', null=True)\n aggregator = models.CharField(max_length=200, blank=True, default='',\n help_text='Who gathered this content together?', null=True)\n provider = models.CharField(max_length=200, blank=True, default='',\n help_text='Who distributed this content?', null=True)\n role_visibility = models.CharField(max_length=50, choices=roles.choices,\n default=roles.LEARNER)\n freeze_authoring_data = models.BooleanField(default=False)\n grade_levels = models.JSONField(blank=True, null=True)\n resource_types = models.JSONField(blank=True, null=True)\n learning_activities = models.JSONField(blank=True, null=True)\n accessibility_labels = models.JSONField(blank=True, null=True)\n categories = models.JSONField(blank=True, null=True)\n learner_needs = models.JSONField(blank=True, null=True)\n suggested_duration = models.IntegerField(blank=True, null=True,\n help_text='Suggested duration for the content node (in seconds)')\n objects = CustomContentNodeTreeManager()\n _field_updates = FieldTracker()\n _permission_filter = Q(tree_id=OuterRef('tree_id'))\n\n @classmethod\n def _annotate_channel_id(cls, queryset):\n return queryset.annotate(channel_id=Subquery(Channel.objects.filter\n (main_tree__tree_id=OuterRef('tree_id')).values_list('id', flat\n =True)[:1]))\n\n @classmethod\n def filter_by_pk(cls, pk):\n \"\"\"\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always\n returns a queryset filtered by pk.\n\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode\n for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If\n a ContentNode does not exist for `pk` then an empty queryset is returned.\n \"\"\"\n query = ContentNode.objects.filter(pk=pk)\n if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:\n tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))\n if tree_id:\n query = query.filter(tree_id=tree_id)\n else:\n tree_id = ContentNode.objects.filter(pk=pk).values_list(\n 'tree_id', flat=True).first()\n if tree_id:\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk),\n tree_id, None)\n query = query.filter(tree_id=tree_id)\n else:\n query = query.none()\n return query\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('tree_id')).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n @raise_if_unsaved\n def get_root(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return super(ContentNode, self).get_root()\n\n @raise_if_unsaved\n def get_root_id(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return ContentNode.objects.values_list('pk', flat=True).get(tree_id\n =self._mpttfield('tree_id'), parent=None)\n\n def get_tree_data(self, levels=float('inf')):\n \"\"\"\n Returns `levels`-deep tree information starting at current node.\n Args:\n levels (int): depth of tree hierarchy to return\n Returns:\n tree (dict): starting with self, with children list containing either\n the just the children's `node_id`s or full recusive tree.\n \"\"\"\n if self.kind_id == content_kinds.TOPIC:\n node_data = {'title': self.title, 'kind': self.kind_id,\n 'node_id': self.node_id, 'studio_id': self.id}\n children = self.children.all()\n if levels > 0:\n node_data['children'] = [c.get_tree_data(levels=levels - 1) for\n c in children]\n return node_data\n if self.kind_id == content_kinds.EXERCISE:\n return {'title': self.title, 'kind': self.kind_id, 'count':\n self.assessment_items.count(), 'node_id': self.node_id,\n 'studio_id': self.id}\n return {'title': self.title, 'kind': self.kind_id, 'file_size':\n self.files.values('file_size').aggregate(size=Sum('file_size'))\n ['size'], 'node_id': self.node_id, 'studio_id': self.id}\n\n def get_original_node(self):\n original_node = self.original_node or self\n if self.original_channel_id and self.original_source_node_id:\n original_tree_id = Channel.objects.select_related('main_tree').get(\n pk=self.original_channel_id).main_tree.tree_id\n original_node = ContentNode.objects.filter(tree_id=\n original_tree_id, node_id=self.original_source_node_id).first(\n ) or ContentNode.objects.filter(tree_id=original_tree_id,\n content_id=self.content_id).first() or self\n return original_node\n\n def get_associated_presets(self):\n key = 'associated_presets_{}'.format(self.kind_id)\n cached_data = cache.get(key)\n if cached_data:\n return cached_data\n presets = list(FormatPreset.objects.filter(kind=self.kind).values())\n cache.set(key, presets, None)\n return presets\n\n def get_prerequisites(self):\n prerequisite_mapping = {}\n prerequisites = self.prerequisite.all()\n prereqlist = list(prerequisites)\n for prereq in prerequisites:\n prlist, prereqmapping = prereq.get_prerequisites()\n prerequisite_mapping.update({prereq.pk: prereqmapping})\n prereqlist.extend(prlist)\n return prereqlist, prerequisite_mapping\n\n def get_postrequisites(self):\n postrequisite_mapping = {}\n postrequisites = self.is_prerequisite_of.all()\n postreqlist = list(postrequisites)\n for postreq in postrequisites:\n prlist, postreqmapping = postreq.get_postrequisites()\n postrequisite_mapping.update({postreq.pk: postreqmapping})\n postreqlist.extend(prlist)\n return postreqlist, postrequisite_mapping\n\n def get_channel_id(self):\n if hasattr(self, 'channel_id'):\n return self.channel_id\n channel = self.get_channel()\n if channel:\n return channel.id\n return None\n\n def get_channel(self):\n try:\n root = self.get_root()\n if not root:\n return None\n return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=\n root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(\n previous_tree=root)).first()\n except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):\n return None\n\n def get_thumbnail(self):\n if self.thumbnail_encoding:\n thumbnail_data = load_json_string(self.thumbnail_encoding)\n if type(thumbnail_data) is dict and thumbnail_data.get('base64'):\n return thumbnail_data['base64']\n thumbnail = self.files.filter(preset__thumbnail=True).first()\n if thumbnail:\n return generate_storage_url(str(thumbnail))\n return ''\n\n @classmethod\n def get_nodes_with_title(cls, title, limit_to_children_of=None):\n \"\"\"\n Returns all ContentNodes with a given title. If limit_to_children_of\n is passed in with an id, only look at all the children of the node with that id.\n \"\"\"\n if limit_to_children_of:\n root = cls.objects.get(id=limit_to_children_of)\n return root.get_descendants().filter(title=title)\n return cls.objects.filter(title=title)\n\n def get_details(self, channel_id=None):\n \"\"\"\n Returns information about the node and its children, including total size, languages, files, etc.\n\n :return: A dictionary with detailed statistics and information about the node.\n \"\"\"\n from contentcuration.viewsets.common import SQArrayAgg\n from contentcuration.viewsets.common import SQCount\n from contentcuration.viewsets.common import SQRelatedArrayAgg\n from contentcuration.viewsets.common import SQSum\n from contentcuration.viewsets.common import SQJSONBKeyArrayAgg\n node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id\n ).order_by()\n descendants = self.get_descendants().values('id')\n if channel_id:\n channel = Channel.objects.filter(id=channel_id)[0]\n else:\n channel = self.get_channel()\n if not descendants.exists():\n data = {'last_update': pytz.utc.localize(datetime.now()).\n strftime(settings.DATE_TIME_FORMAT), 'created': self.\n created.strftime(settings.DATE_TIME_FORMAT),\n 'resource_count': 0, 'resource_size': 0, 'includes': {\n 'coach_content': 0, 'exercises': 0}, 'kind_count': [],\n 'languages': [], 'accessible_languages': [], 'licenses': [],\n 'tags': [], 'copyright_holders': [], 'authors': [],\n 'aggregators': [], 'providers': [], 'sample_pathway': [],\n 'original_channels': [], 'sample_nodes': [], 'levels': [],\n 'categories': []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None\n )\n return data\n resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()\n nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('checksum', 'file_size').\n order_by(), name='nodes')\n file_query = nodes.queryset().with_cte(nodes).values('checksum',\n 'file_size').distinct()\n l_nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('language_id', 'preset_id').\n order_by(), name='l_nodes')\n accessible_languages_query = l_nodes.queryset().filter(preset_id=\n format_presets.VIDEO_SUBTITLE).with_cte(l_nodes).values(\n 'language__native_name').distinct()\n tags_query = str(ContentTag.objects.filter(tagged_content__pk__in=\n descendants.values_list('pk', flat=True)).values('tag_name').\n annotate(count=Count('tag_name')).query).replace('topic', \"'topic'\"\n )\n kind_count_query = str(resources.values('kind_id').annotate(count=\n Count('kind_id')).query).replace('topic', \"'topic'\")\n node = node.annotate(resource_count=SQCount(resources, field='id'),\n resource_size=SQSum(file_query, field='file_size'),\n copyright_holders=SQArrayAgg(resources.distinct(\n 'copyright_holder').order_by('copyright_holder'), field=\n 'copyright_holder'), authors=SQArrayAgg(resources.distinct(\n 'author').order_by('author'), field='author'), aggregators=\n SQArrayAgg(resources.distinct('aggregator').order_by(\n 'aggregator'), field='aggregator'), providers=SQArrayAgg(\n resources.distinct('provider').order_by('provider'), field=\n 'provider'), languages=SQRelatedArrayAgg(descendants.exclude(\n language=None).distinct('language__native_name').order_by(),\n field='language__native_name', fieldname='native_name'),\n accessible_languages=SQRelatedArrayAgg(\n accessible_languages_query, field='language__native_name',\n fieldname='native_name'), licenses=SQRelatedArrayAgg(resources.\n exclude(license=None).distinct('license__license_name').\n order_by('license__license_name'), field=\n 'license__license_name', fieldname='license_name'), kind_count=\n RawSQL('SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format\n (kind_count_query), ()), tags_list=RawSQL(\n 'SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format(\n tags_query), ()), coach_content=SQCount(resources.filter(\n role_visibility=roles.COACH), field='id'), exercises=SQCount(\n resources.filter(kind_id=content_kinds.EXERCISE), field='id'),\n levels=SQJSONBKeyArrayAgg(descendants.exclude(\n grade_levels__isnull=True), field='grade_levels'),\n all_categories=SQJSONBKeyArrayAgg(descendants.exclude(\n categories__isnull=True), field='categories'))\n max_level = max(resources.values_list('level', flat=True).order_by(\n ).distinct() or [0])\n m_nodes = With(resources.values('id', 'level', 'tree_id', 'lft').\n order_by(), name='m_nodes')\n deepest_node_record = m_nodes.queryset().with_cte(m_nodes).filter(level\n =max_level).values('id').order_by('tree_id', 'lft').first()\n if deepest_node_record:\n deepest_node = ContentNode.objects.get(pk=deepest_node_record['id']\n )\n pathway = list(deepest_node.get_ancestors().order_by().exclude(\n parent=None).values('title', 'node_id', 'kind_id').order_by()\n ) if deepest_node_record else []\n sample_nodes = [{'node_id': n.node_id, 'title': n.title,\n 'description': n.description, 'thumbnail': n.get_thumbnail(),\n 'kind': n.kind_id} for n in deepest_node.get_siblings(\n include_self=True)[0:4]] if deepest_node_record else []\n channel_id = channel and channel.id\n originals = resources.values('original_channel_id').annotate(count=\n Count('original_channel_id')).order_by('original_channel_id')\n originals = {c['original_channel_id']: c['count'] for c in originals}\n original_channels = Channel.objects.exclude(pk=channel_id).filter(\n pk__in=originals.keys(), deleted=False).order_by()\n original_channels = [{'id': c.id, 'name': '{}{}'.format(c.name, _(\n ' (Original)') if channel_id == c.id else ''), 'thumbnail': c.\n get_thumbnail(), 'count': originals[c.id]} for c in\n original_channels]\n node = node.order_by().values('id', 'resource_count',\n 'resource_size', 'copyright_holders', 'authors', 'aggregators',\n 'providers', 'languages', 'accessible_languages',\n 'coach_content', 'licenses', 'tags_list', 'kind_count',\n 'exercises', 'levels', 'all_categories').first()\n for_educators = {'coach_content': node['coach_content'],\n 'exercises': node['exercises']}\n data = {'last_update': pytz.utc.localize(datetime.now()).strftime(\n settings.DATE_TIME_FORMAT), 'created': self.created.strftime(\n settings.DATE_TIME_FORMAT), 'resource_count': node.get(\n 'resource_count', 0), 'resource_size': node.get('resource_size',\n 0), 'includes': for_educators, 'kind_count': node.get(\n 'kind_count') or [], 'languages': node.get('languages') or [],\n 'accessible_languages': node.get('accessible_languages') or [],\n 'licenses': node.get('licenses') or [], 'tags': node.get(\n 'tags_list') or [], 'original_channels': original_channels,\n 'sample_pathway': pathway, 'sample_nodes': sample_nodes,\n 'authors': list(filter(bool, node['authors'])), 'aggregators':\n list(filter(bool, node['aggregators'])), 'providers': list(\n filter(bool, node['providers'])), 'copyright_holders': list(\n filter(bool, node['copyright_holders'])), 'levels': node.get(\n 'levels') or [], 'categories': node.get('all_categories') or []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None)\n return data\n\n def has_changes(self):\n mptt_opts = self._mptt_meta\n blacklist = set(['changed', 'modified', 'publishing', mptt_opts.\n tree_id_attr, mptt_opts.left_attr, mptt_opts.right_attr,\n mptt_opts.level_attr])\n original_values = self._field_updates.changed()\n return any(True for field in original_values if field not in blacklist)\n\n def recalculate_editors_storage(self):\n from contentcuration.utils.user import calculate_user_storage\n for editor in self.files.values_list('uploaded_by_id', flat=True\n ).distinct():\n calculate_user_storage(editor)\n\n def mark_complete(self):\n errors = []\n if not (bool(self.title) or self.parent_id is None):\n errors.append('Empty title')\n if self.kind_id != content_kinds.TOPIC:\n if not self.license:\n errors.append('Missing license')\n if (self.license and self.license.is_custom and not self.\n license_description):\n errors.append('Missing license description for custom license')\n if (self.license and self.license.copyright_holder_required and\n not self.copyright_holder):\n errors.append('Missing required copyright holder')\n if (self.kind_id != content_kinds.EXERCISE and not self.files.\n filter(preset__supplementary=False).exists()):\n errors.append('Missing default file')\n if self.kind_id == content_kinds.EXERCISE:\n if not self.assessment_items.filter(~Q(raw_data='') | ~Q(\n question='') & ~Q(answers='[]') & (Q(type=exercises.\n INPUT_QUESTION) | Q(answers__iregex='\"correct\":\\\\s*true'))\n ).exists():\n errors.append(\n 'No questions with question text and complete answers')\n criterion = self.extra_fields.get('options', {}).get(\n 'completion_criteria')\n if not (self.extra_fields.get('mastery_model') or criterion):\n errors.append('Missing mastery criterion')\n if criterion:\n try:\n completion_criteria.validate(criterion, kind=\n content_kinds.EXERCISE)\n except completion_criteria.ValidationError:\n errors.append(\n 'Mastery criterion is defined but is invalid')\n self.complete = not errors\n return errors\n\n def make_content_id_unique(self):\n \"\"\"\n If self is NOT an original contentnode (in other words, a copied contentnode)\n and a contentnode with same content_id exists then we update self's content_id.\n \"\"\"\n is_node_original = (self.original_source_node_id is None or self.\n original_source_node_id == self.node_id)\n node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(\n content_id=self.content_id)\n if not is_node_original and node_same_content_id.exists():\n ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.\n uuid4().hex)\n\n def on_create(self):\n self.changed = True\n self.recalculate_editors_storage()\n self.set_default_learning_activity()\n\n def on_update(self):\n self.changed = self.changed or self.has_changes()\n\n def move_to(self, target, *args, **kwargs):\n parent_was_trashtree = self.parent.channel_trash.exists()\n super(ContentNode, self).move_to(target, *args, **kwargs)\n self.save()\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.\n tree_id, None)\n if target.channel_trash.exists() or parent_was_trashtree:\n self.recalculate_editors_storage()\n\n def set_default_learning_activity(self):\n if self.learning_activities is None:\n if self.kind in kind_activity_map:\n self.learning_activities = {kind_activity_map[self.kind]: True}\n\n def save(self, skip_lock=False, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n old_parent_id = self._field_updates.changed().get('parent_id')\n if self._state.adding and (self.parent_id or self.parent):\n same_order = False\n elif old_parent_id is DeferredAttribute:\n same_order = True\n else:\n same_order = old_parent_id == self.parent_id\n if not same_order:\n changed_ids = list(filter(lambda x: x is not None, set([\n old_parent_id, self.parent_id])))\n else:\n changed_ids = []\n if not same_order and not skip_lock:\n with ContentNode.objects.lock_mptt(*ContentNode.objects.filter(\n id__in=[pid for pid in [old_parent_id, self.parent_id] if\n pid]).values_list('tree_id', flat=True).distinct()):\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(\n changed=True)\n else:\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(changed\n =True)\n save.alters_data = True\n\n def delete(self, *args, **kwargs):\n parent = self.parent or self._field_updates.changed().get('parent')\n if parent:\n parent.changed = True\n parent.save()\n self.recalculate_editors_storage()\n with ContentNode.objects.lock_mptt(self.tree_id):\n return super(ContentNode, self).delete(*args, **kwargs)\n delete.alters_data = True\n\n def copy_to(self, target=None, position='last-child', pk=None, mods=\n None, excluded_descendants=None, can_edit_source_channel=None,\n batch_size=None, progress_tracker=None):\n return self._tree_manager.copy_node(self, target, position, pk,\n mods, excluded_descendants, can_edit_source_channel, batch_size,\n progress_tracker)[0]\n\n def copy(self):\n return self.copy_to()\n\n def is_publishable(self):\n return self.complete and self.get_descendants(include_self=True\n ).exclude(kind_id=content_kinds.TOPIC).exists()\n\n\n class Meta:\n verbose_name = 'Topic'\n verbose_name_plural = 'Topics'\n indexes = [models.Index(fields=['node_id'], name=NODE_ID_INDEX_NAME\n ), models.Index(fields=['-modified'], name=\n NODE_MODIFIED_DESC_INDEX_NAME)]\n\n\nclass ContentKind(models.Model):\n kind = models.CharField(primary_key=True, max_length=200, choices=\n content_kinds.choices)\n\n def __str__(self):\n return self.kind\n\n\nclass FileFormat(models.Model):\n extension = models.CharField(primary_key=True, max_length=40, choices=\n file_formats.choices)\n mimetype = models.CharField(max_length=200, blank=True)\n\n def __str__(self):\n return self.extension\n\n\nclass FormatPreset(models.Model):\n id = models.CharField(primary_key=True, max_length=150, choices=\n format_presets.choices)\n readable_name = models.CharField(max_length=400)\n multi_language = models.BooleanField(default=False)\n supplementary = models.BooleanField(default=False)\n thumbnail = models.BooleanField(default=False)\n subtitle = models.BooleanField(default=False)\n display = models.BooleanField(default=True)\n order = models.IntegerField(default=0)\n kind = models.ForeignKey(ContentKind, related_name='format_presets',\n null=True, on_delete=models.SET_NULL)\n allowed_formats = models.ManyToManyField(FileFormat, blank=True)\n\n def __str__(self):\n return self.id\n\n @classmethod\n def guess_format_preset(cls, filename):\n \"\"\"\n Guess the format preset of a filename based on its extension.\n\n Return None if format is unknown.\n \"\"\"\n _, ext = os.path.splitext(filename)\n ext = ext.lstrip('.')\n f = FormatPreset.objects.filter(allowed_formats__extension=ext,\n display=True)\n return f.first()\n\n @classmethod\n def get_preset(cls, preset_name):\n \"\"\"\n Get the FormatPreset object with that exact name.\n\n Returns None if that format preset is not found.\n \"\"\"\n try:\n return FormatPreset.objects.get(id=preset_name)\n except FormatPreset.DoesNotExist:\n return None\n\n\nclass Language(models.Model):\n id = models.CharField(max_length=14, primary_key=True)\n lang_code = models.CharField(max_length=3, db_index=True)\n lang_subcode = models.CharField(max_length=10, db_index=True, blank=\n True, null=True)\n readable_name = models.CharField(max_length=100, blank=True)\n native_name = models.CharField(max_length=100, blank=True)\n lang_direction = models.CharField(max_length=3, choices=languages.\n LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])\n\n def ietf_name(self):\n return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.\n lang_subcode) if self.lang_subcode else self.lang_code\n\n def __str__(self):\n return self.ietf_name()\n\n\n<mask token>\n\n\nclass AssessmentItem(models.Model):\n type = models.CharField(max_length=50, default='multiplechoice')\n question = models.TextField(blank=True)\n hints = models.TextField(default='[]')\n answers = models.TextField(default='[]')\n order = models.IntegerField(default=1)\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'assessment_items', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,\n editable=False)\n raw_data = models.TextField(blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n randomize = models.BooleanField(default=False)\n deleted = models.BooleanField(default=False)\n objects = CustomManager()\n _field_updates = FieldTracker()\n\n def has_changes(self):\n return bool(self._field_updates.changed())\n\n\n class Meta:\n indexes = [models.Index(fields=['assessment_id'], name=\n ASSESSMENT_ID_INDEX_NAME)]\n unique_together = ['contentnode', 'assessment_id']\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'\n )).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n def on_create(self):\n \"\"\"\n When an exercise is added to a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n \"\"\"\n When an exercise is updated of a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n When an exercise is deleted from a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n return super(AssessmentItem, self).delete(*args, **kwargs)\n\n\nclass SlideshowSlide(models.Model):\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'slideshow_slides', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n sort_order = models.FloatField(default=1.0)\n metadata = JSONField(default=dict)\n\n\nclass StagedFile(models.Model):\n \"\"\"\n Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit\n \"\"\"\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='staged_files',\n blank=True, null=True, on_delete=models.CASCADE)\n\n\n<mask token>\n\n\nclass File(models.Model):\n \"\"\"\n The bottom layer of the contentDB schema, defines the basic building brick for content.\n Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n file_on_disk = models.FileField(upload_to=object_storage_name, storage=\n default_storage, max_length=500, blank=True)\n contentnode = models.ForeignKey(ContentNode, related_name='files',\n blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n assessment_item = models.ForeignKey(AssessmentItem, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n file_format = models.ForeignKey(FileFormat, related_name='files', blank\n =True, null=True, db_index=True, on_delete=models.SET_NULL)\n preset = models.ForeignKey(FormatPreset, related_name='files', blank=\n True, null=True, db_index=True, on_delete=models.SET_NULL)\n language = models.ForeignKey(Language, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n original_filename = models.CharField(max_length=255, blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n modified = models.DateTimeField(auto_now=True, verbose_name='modified',\n null=True)\n duration = models.IntegerField(blank=True, null=True)\n objects = CustomManager()\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(\n tree_id=OuterRef('assessment_item__contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.\n _permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(edit=True) | Q(uploaded_by=user,\n contentnode__isnull=True, assessment_item__isnull=True))\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True).filter(Q(main_tree__tree_id=OuterRef(\n 'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(\n 'assessment_item__contentnode__tree_id'))).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |\n Q(uploaded_by=user, contentnode__isnull=True,\n assessment_item__isnull=True))\n\n\n class Admin:\n pass\n\n def __str__(self):\n return '{checksum}{extension}'.format(checksum=self.checksum,\n extension='.' + self.file_format.extension)\n\n def filename(self):\n \"\"\"\n Returns just the filename of the File in storage, without the path\n\n e.g. abcd.mp4\n \"\"\"\n return os.path.basename(self.file_on_disk.name)\n\n def update_contentnode_content_id(self):\n \"\"\"\n If the file is attached to a contentnode and is not a thumbnail\n then update that contentnode's content_id if it's a copied contentnode.\n \"\"\"\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n self.modified = timezone.now()\n self.update_contentnode_content_id()\n\n def save(self, set_by_file_on_disk=True, *args, **kwargs):\n \"\"\"\n Overrider the default save method.\n If the file_on_disk FileField gets passed a content copy:\n 1. generate the MD5 from the content copy\n 2. fill the other fields accordingly\n \"\"\"\n from contentcuration.utils.user import calculate_user_storage\n if self.file_format_id:\n if self.file_format_id not in dict(file_formats.choices):\n raise ValidationError('Invalid file_format')\n if set_by_file_on_disk and self.file_on_disk:\n if self.checksum is None or self.checksum == '':\n md5 = hashlib.md5()\n for chunk in self.file_on_disk.chunks():\n md5.update(chunk)\n self.checksum = md5.hexdigest()\n if not self.file_size:\n self.file_size = self.file_on_disk.size\n if not self.file_format_id:\n ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')\n if ext in list(dict(file_formats.choices).keys()):\n self.file_format_id = ext\n else:\n raise ValueError('Files of type `{}` are not supported.'\n .format(ext))\n super(File, self).save(*args, **kwargs)\n if self.uploaded_by_id:\n calculate_user_storage(self.uploaded_by_id)\n\n\n class Meta:\n indexes = [models.Index(fields=['checksum', 'file_size'], name=\n FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],\n name=FILE_MODIFIED_DESC_INDEX_NAME)]\n constraints = [models.CheckConstraint(check=Q(preset__in=\n MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name\n =FILE_DURATION_CONSTRAINT)]\n\n\n<mask token>\n\n\nclass PrerequisiteContentRelationship(models.Model):\n \"\"\"\n Predefine the prerequisite relationship between two ContentNode objects.\n \"\"\"\n target_node = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)\n prerequisite = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['target_node', 'prerequisite']\n\n def clean(self, *args, **kwargs):\n if self.target_node == self.prerequisite:\n raise IntegrityError('Cannot self reference as prerequisite.')\n if PrerequisiteContentRelationship.objects.using(self._state.db\n ).filter(target_node=self.prerequisite, prerequisite=self.\n target_node):\n raise IntegrityError(\n 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'\n % (self.target_node, self.prerequisite))\n super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super(PrerequisiteContentRelationship, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n\nclass RelatedContentRelationship(models.Model):\n \"\"\"\n Predefine the related relationship between two ContentNode objects.\n \"\"\"\n contentnode_1 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_1', on_delete=models.CASCADE)\n contentnode_2 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_2', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['contentnode_1', 'contentnode_2']\n\n def save(self, *args, **kwargs):\n if self.contentnode_1 == self.contentnode_2:\n raise IntegrityError('Cannot self reference as related.')\n if RelatedContentRelationship.objects.using(self._state.db).filter(\n contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1\n ):\n return\n super(RelatedContentRelationship, self).save(*args, **kwargs)\n\n\nclass Invitation(models.Model):\n \"\"\" Invitation to edit channel \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n accepted = models.BooleanField(default=False)\n declined = models.BooleanField(default=False)\n revoked = models.BooleanField(default=False)\n invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n SET_NULL, null=True, related_name='sent_to')\n share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)\n email = models.EmailField(max_length=100, null=True)\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\n 'sent_by', null=True, on_delete=models.CASCADE)\n channel = models.ForeignKey('Channel', null=True, related_name=\n 'pending_editors', on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100, blank=True)\n last_name = models.CharField(max_length=100, blank=True, null=True)\n\n\n class Meta:\n verbose_name = 'Invitation'\n verbose_name_plural = 'Invitations'\n\n def accept(self):\n user = User.objects.filter(email__iexact=self.email).first()\n if self.channel:\n if self.share_mode == VIEW_ACCESS:\n self.channel.editors.remove(user)\n self.channel.viewers.add(user)\n else:\n self.channel.viewers.remove(user)\n self.channel.editors.add(user)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user)).distinct()\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user) | Q(channel__viewers=user)).distinct()\n\n\nclass Change(models.Model):\n server_rev = models.BigAutoField(primary_key=True)\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,\n blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')\n channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=\n models.CASCADE)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=\n True, on_delete=models.CASCADE, related_name='changes_about_user')\n client_rev = models.IntegerField(null=True, blank=True)\n session = models.ForeignKey(Session, null=True, blank=True, on_delete=\n models.SET_NULL)\n table = models.CharField(max_length=32)\n change_type = models.IntegerField()\n kwargs = JSONField(encoder=JSONEncoder)\n applied = models.BooleanField(default=False)\n errored = models.BooleanField(default=False)\n\n @classmethod\n def _create_from_change(cls, created_by_id=None, channel_id=None,\n user_id=None, session_key=None, applied=False, table=None, rev=None,\n **data):\n change_type = data.pop('type')\n if table is None or table not in ALL_TABLES:\n raise TypeError(\n 'table is a required argument for creating changes and must be a valid table name'\n )\n if change_type is None or change_type not in ALL_CHANGES:\n raise TypeError(\n 'change_type is a required argument for creating changes and must be a valid change type integer'\n )\n return cls(session_id=session_key, created_by_id=created_by_id,\n channel_id=channel_id, user_id=user_id, client_rev=rev, table=\n table, change_type=change_type, kwargs=data, applied=applied)\n\n @classmethod\n def create_changes(cls, changes, created_by_id=None, session_key=None,\n applied=False):\n change_models = []\n for change in changes:\n change_models.append(cls._create_from_change(created_by_id=\n created_by_id, session_key=session_key, applied=applied, **\n change))\n cls.objects.bulk_create(change_models)\n return change_models\n\n @classmethod\n def create_change(cls, change, created_by_id=None, session_key=None,\n applied=False):\n obj = cls._create_from_change(created_by_id=created_by_id,\n session_key=session_key, applied=applied, **change)\n obj.save()\n return obj\n\n @classmethod\n def serialize(cls, change):\n datum = get_attribute(change, ['kwargs']).copy()\n datum.update({'server_rev': get_attribute(change, ['server_rev']),\n 'table': get_attribute(change, ['table']), 'type':\n get_attribute(change, ['change_type']), 'channel_id':\n get_attribute(change, ['channel_id']), 'user_id': get_attribute\n (change, ['user_id']), 'created_by_id': get_attribute(change, [\n 'created_by_id'])})\n return datum\n\n def serialize_to_change_dict(self):\n return self.serialize(self)\n\n\nclass TaskResultCustom(object):\n \"\"\"\n Custom fields to add to django_celery_results's TaskResult model\n\n If adding fields to this class, run `makemigrations` then move the generated migration from the\n `django_celery_results` app to the `contentcuration` app and override the constructor to change\n the app_label. See `0141_add_task_signature` for an example\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',\n on_delete=models.CASCADE, null=True)\n channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)\n progress = models.IntegerField(null=True, blank=True, validators=[\n MinValueValidator(0), MaxValueValidator(100)])\n signature = models.CharField(null=True, blank=False, max_length=32)\n super_as_dict = TaskResult.as_dict\n\n def as_dict(self):\n \"\"\"\n :return: A dictionary representation\n \"\"\"\n super_dict = self.super_as_dict()\n super_dict.update(user_id=self.user_id, channel_id=self.channel_id,\n progress=self.progress)\n return super_dict\n\n @classmethod\n def contribute_to_class(cls, model_class=TaskResult):\n \"\"\"\n Adds fields to model, by default TaskResult\n :param model_class: TaskResult model\n \"\"\"\n for field in dir(cls):\n if not field.startswith('_') and field not in (\n 'contribute_to_class', 'Meta'):\n model_class.add_to_class(field, getattr(cls, field))\n setattr(model_class._meta, 'indexes', getattr(model_class._meta,\n 'indexes', []) + cls.Meta.indexes)\n\n\n class Meta:\n indexes = [models.Index(fields=['signature'], name=\n 'task_result_signature_idx', condition=Q(status__in=\n celery_states.UNREADY_STATES))]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SecretToken(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def exists(cls, token):\n \"\"\"\n Return true when the token string given by string already exists.\n Returns false otherwise.\n \"\"\"\n return cls.objects.filter(token=token).exists()\n\n @classmethod\n def generate_new_token(cls):\n \"\"\"\n Creates a primary secret token for the current channel using a proquint\n string. Creates a secondary token containing the channel id.\n\n These tokens can be used to refer to the channel to download its content\n database.\n \"\"\"\n token = proquint.generate()\n TRIALS = 100\n for __ in range(TRIALS):\n token = proquint.generate()\n if SecretToken.exists(token):\n continue\n break\n else:\n raise ValueError('Cannot generate new token')\n return token\n\n def __str__(self):\n return '{}-{}'.format(self.token[:5], self.token[5:])\n\n\n<mask token>\n\n\nclass PermissionCTE(With):\n tree_id_fields = ['channel__{}__tree_id'.format(tree_name) for\n tree_name in CHANNEL_TREES]\n\n def __init__(self, model, user_id, **kwargs):\n queryset = model.objects.filter(user_id=user_id).annotate(tree_id=\n Unnest(ArrayRemove(Array(*self.tree_id_fields), None),\n output_field=models.IntegerField()))\n super(PermissionCTE, self).__init__(queryset=queryset.values(\n 'user_id', 'channel_id', 'tree_id'), **kwargs)\n\n @classmethod\n def editable_channels(cls, user_id):\n return PermissionCTE(User.editable_channels.through, user_id, name=\n 'editable_channels_cte')\n\n @classmethod\n def view_only_channels(cls, user_id):\n return PermissionCTE(User.view_only_channels.through, user_id, name\n ='view_only_channels_cte')\n\n def exists(self, *filters):\n return Exists(self.queryset().filter(*filters).values('user_id'))\n\n\nclass Channel(models.Model):\n \"\"\" Permissions come from association with organizations \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n tagline = models.CharField(max_length=150, blank=True, null=True)\n version = models.IntegerField(default=0)\n thumbnail = models.TextField(blank=True, null=True)\n thumbnail_encoding = JSONField(default=dict)\n editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='editable_channels', verbose_name='editors', help_text=\n 'Users with edit rights', blank=True)\n viewers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='view_only_channels', verbose_name='viewers', help_text=\n 'Users with view only rights', blank=True)\n language = models.ForeignKey('Language', null=True, blank=True,\n related_name='channel_language', on_delete=models.SET_NULL)\n trash_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_trash', on_delete=models.SET_NULL)\n clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_clipboard', on_delete=models.SET_NULL)\n main_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_main', on_delete=models.SET_NULL)\n staging_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_staging', on_delete=models.SET_NULL)\n chef_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_chef', on_delete=models.SET_NULL)\n previous_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_previous', on_delete=models.SET_NULL)\n bookmarked_by = models.ManyToManyField(settings.AUTH_USER_MODEL,\n related_name='bookmarked_channels', verbose_name='bookmarked by')\n deleted = models.BooleanField(default=False, db_index=True)\n public = models.BooleanField(default=False, db_index=True)\n preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)\n content_defaults = JSONField(default=dict)\n priority = models.IntegerField(default=0, help_text=\n 'Order to display public channels')\n last_published = models.DateTimeField(blank=True, null=True)\n secret_tokens = models.ManyToManyField(SecretToken, related_name=\n 'channels', verbose_name='secret tokens', blank=True)\n source_url = models.CharField(max_length=200, blank=True, null=True)\n demo_server_url = models.CharField(max_length=200, blank=True, null=True)\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n ricecooker_version = models.CharField(max_length=100, blank=True, null=True\n )\n published_data = JSONField(default=dict)\n icon_encoding = models.TextField(blank=True, null=True)\n total_resource_count = models.IntegerField(default=0)\n published_kind_count = models.TextField(blank=True, null=True)\n published_size = models.FloatField(default=0)\n included_languages = models.ManyToManyField('Language', related_name=\n 'channels', verbose_name='languages', blank=True)\n _field_updates = FieldTracker(fields=['description', 'language_id',\n 'thumbnail', 'name', 'thumbnail_encoding', 'deleted', 'public',\n 'main_tree_id', 'version'])\n\n @classmethod\n def get_editable(cls, user, channel_id):\n return cls.filter_edit_queryset(cls.objects.all(), user).get(id=\n channel_id)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit = Exists(User.editable_channels.through.objects.filter(user_id\n =user_id, channel_id=OuterRef('id')))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n user_email = not user.is_anonymous and user.email\n if user_id:\n filters = dict(user_id=user_id, channel_id=OuterRef('id'))\n edit = Exists(User.editable_channels.through.objects.filter(**\n filters).values('user_id'))\n view = Exists(User.view_only_channels.through.objects.filter(**\n filters).values('user_id'))\n else:\n edit = boolean_val(False)\n view = boolean_val(False)\n queryset = queryset.annotate(edit=edit, view=view)\n if user_id and user.is_admin:\n return queryset\n permission_filter = Q()\n if user_id:\n pending_channels = Invitation.objects.filter(email=user_email,\n revoked=False, declined=False, accepted=False).values_list(\n 'channel_id', flat=True)\n permission_filter = Q(view=True) | Q(edit=True) | Q(deleted=\n False, id__in=pending_channels)\n return queryset.filter(permission_filter | Q(deleted=False, public=\n True))\n\n @classmethod\n def get_all_channels(cls):\n return cls.objects.select_related('main_tree').prefetch_related(\n 'editors', 'viewers').distinct()\n\n def resource_size_key(self):\n return '{}_resource_size'.format(self.pk)\n\n def get_resource_size(self):\n cached_data = cache.get(self.resource_size_key())\n if cached_data:\n return cached_data\n tree_id = self.main_tree.tree_id\n files = File.objects.select_related('contentnode', 'assessment_item'\n ).filter(contentnode__tree_id=tree_id).values('checksum',\n 'file_size').distinct().aggregate(resource_size=Sum('file_size'))\n cache.set(self.resource_size_key(), files['resource_size'] or 0, None)\n return files['resource_size'] or 0\n\n def on_create(self):\n record_channel_stats(self, None)\n if not self.content_defaults:\n self.content_defaults = DEFAULT_CONTENT_DEFAULTS\n if not self.main_tree:\n self.main_tree = ContentNode.objects.create(title=self.name,\n kind_id=content_kinds.TOPIC, content_id=self.id, node_id=\n self.id, original_channel_id=self.id, source_channel_id=\n self.id, changed=True, complete=True)\n if settings.DEBUG:\n if ContentNode.objects.filter(parent=None, tree_id=self.\n main_tree.tree_id).count() != 1:\n raise AssertionError\n if not self.trash_tree:\n self.trash_tree = ContentNode.objects.create(title=self.name,\n kind_id=content_kinds.TOPIC, content_id=self.id, node_id=\n self.id)\n if self.public and (self.main_tree and self.main_tree.published):\n delete_public_channel_cache_keys()\n\n def on_update(self):\n from contentcuration.utils.user import calculate_user_storage\n original_values = self._field_updates.changed()\n record_channel_stats(self, original_values)\n blacklist = set(['public', 'main_tree_id', 'version'])\n if self.main_tree and original_values and any(True for field in\n original_values if field not in blacklist):\n self.main_tree.changed = True\n if 'thumbnail' in original_values and original_values['thumbnail'\n ] and 'static' not in original_values['thumbnail']:\n filename, ext = os.path.splitext(original_values['thumbnail'])\n delete_empty_file_reference(filename, ext[1:])\n if 'deleted' in original_values:\n for editor in self.editors.all():\n calculate_user_storage(editor.pk)\n if 'deleted' in original_values and not original_values['deleted']:\n self.pending_editors.all().delete()\n export_db_storage_path = os.path.join(settings.DB_ROOT,\n '{channel_id}.sqlite3'.format(channel_id=self.id))\n if default_storage.exists(export_db_storage_path):\n default_storage.delete(export_db_storage_path)\n if self.main_tree:\n self.main_tree.published = False\n if self.main_tree and self.main_tree._field_updates.changed():\n self.main_tree.save()\n if 'public' in original_values and (self.main_tree and self.\n main_tree.published):\n delete_public_channel_cache_keys()\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n super(Channel, self).save(*args, **kwargs)\n\n def get_thumbnail(self):\n return get_channel_thumbnail(self)\n\n def has_changes(self):\n return self.main_tree.get_descendants(include_self=True).filter(changed\n =True).exists()\n\n def get_date_modified(self):\n return self.main_tree.get_descendants(include_self=True).aggregate(\n last_modified=Max('modified'))['last_modified']\n\n def get_resource_count(self):\n return self.main_tree.get_descendants().exclude(kind_id=\n content_kinds.TOPIC).order_by('content_id').distinct('content_id'\n ).count()\n\n def get_human_token(self):\n return self.secret_tokens.get(is_primary=True)\n\n def get_channel_id_token(self):\n return self.secret_tokens.get(token=self.id)\n\n def make_token(self):\n token = self.secret_tokens.create(token=SecretToken.\n generate_new_token(), is_primary=True)\n self.secret_tokens.get_or_create(token=self.id)\n return token\n\n def make_public(self, bypass_signals=False):\n \"\"\"\n Sets the current channel object to be public and viewable by anyone.\n\n If bypass_signals is True, update the model in such a way that we\n prevent any model signals from running due to the update.\n\n Returns the same channel object.\n \"\"\"\n if bypass_signals:\n self.public = True\n Channel.objects.filter(id=self.id).update(public=True)\n delete_public_channel_cache_keys()\n else:\n self.public = True\n self.save()\n return self\n\n def mark_created(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n CREATION)\n\n def mark_publishing(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n PUBLICATION)\n self.main_tree.publishing = True\n self.main_tree.save()\n\n def mark_deleted(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n DELETION)\n self.deleted = True\n self.save()\n\n def mark_recovered(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n RECOVERY)\n self.deleted = False\n self.save()\n\n @property\n def deletion_history(self):\n return self.history.filter(action=channel_history.DELETION)\n\n @property\n def publishing_history(self):\n return self.history.filter(action=channel_history.PUBLICATION)\n\n @classmethod\n def get_public_channels(cls, defer_nonmain_trees=False):\n \"\"\"\n Get all public channels.\n\n If defer_nonmain_trees is True, defer the loading of all\n trees except for the main_tree.\"\"\"\n if defer_nonmain_trees:\n c = Channel.objects.filter(public=True).exclude(deleted=True\n ).select_related('main_tree').prefetch_related('editors'\n ).defer('trash_tree', 'clipboard_tree', 'staging_tree',\n 'chef_tree', 'previous_tree', 'viewers')\n else:\n c = Channel.objects.filter(public=True).exclude(deleted=True)\n return c\n\n\n class Meta:\n verbose_name = 'Channel'\n verbose_name_plural = 'Channels'\n indexes = [models.Index(fields=['name'], name=CHANNEL_NAME_INDEX_NAME)]\n index_together = [['deleted', 'public']]\n\n\n<mask token>\n\n\nclass ChannelHistory(models.Model):\n \"\"\"\n Model for tracking certain actions performed on a channel\n \"\"\"\n channel = models.ForeignKey('Channel', null=False, blank=False,\n related_name='history', on_delete=models.CASCADE)\n actor = models.ForeignKey('User', null=False, blank=False, related_name\n ='channel_history', on_delete=models.CASCADE)\n performed = models.DateTimeField(default=timezone.now)\n action = models.CharField(max_length=50, choices=channel_history.choices)\n\n @classmethod\n def prune(cls):\n \"\"\"\n Prunes history records by keeping the most recent actions for each channel and type,\n and deleting all other older actions\n \"\"\"\n keep_ids = cls.objects.distinct('channel_id', 'action').order_by(\n 'channel_id', 'action', '-performed').values_list('id', flat=True)\n cls.objects.exclude(id__in=keep_ids).delete()\n\n\n class Meta:\n verbose_name = 'Channel history'\n verbose_name_plural = 'Channel histories'\n indexes = [models.Index(fields=['channel_id'], name=\n CHANNEL_HISTORY_CHANNEL_INDEX_NAME)]\n\n\nclass UserHistory(models.Model):\n \"\"\"\n Model that stores the user's action history.\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=\n False, related_name='history', on_delete=models.CASCADE)\n action = models.CharField(max_length=32, choices=user_history.choices)\n performed_at = models.DateTimeField(default=timezone.now)\n\n\nclass ChannelSet(models.Model):\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n public = models.BooleanField(default=False, db_index=True)\n editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='channel_sets', verbose_name='editors', help_text=\n 'Users with edit rights', blank=True)\n secret_token = models.ForeignKey('SecretToken', null=True, blank=True,\n related_name='channel_sets', on_delete=models.SET_NULL)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n user_id = not user.is_anonymous and user.id\n edit = Exists(User.channel_sets.through.objects.filter(user_id=\n user_id, channelset_id=OuterRef('id')))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n return cls.filter_edit_queryset(queryset, user)\n\n def get_channels(self):\n if self.secret_token:\n return self.secret_token.channels.filter(deleted=False)\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n super(ChannelSet, self).save()\n\n def on_create(self):\n if not self.secret_token:\n self.secret_token = SecretToken.objects.create(token=\n SecretToken.generate_new_token())\n\n def delete(self, *args, **kwargs):\n super(ChannelSet, self).delete(*args, **kwargs)\n if self.secret_token:\n self.secret_token.delete()\n\n\nclass ContentTag(models.Model):\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n tag_name = models.CharField(max_length=50)\n channel = models.ForeignKey('Channel', related_name='tags', blank=True,\n null=True, db_index=True, on_delete=models.SET_NULL)\n objects = CustomManager()\n\n def __str__(self):\n return self.tag_name\n\n\n class Meta:\n unique_together = ['tag_name', 'channel']\n\n\nclass License(models.Model):\n \"\"\"\n Normalize the license of ContentNode model\n \"\"\"\n license_name = models.CharField(max_length=50)\n license_url = models.URLField(blank=True)\n license_description = models.TextField(blank=True)\n copyright_holder_required = models.BooleanField(default=True)\n is_custom = models.BooleanField(default=False)\n exists = models.BooleanField(default=False, verbose_name=\n 'license exists', help_text=\n 'Tells whether or not a content item is licensed to share')\n\n @classmethod\n def validate_name(cls, name):\n if cls.objects.filter(license_name=name).count() == 0:\n raise ValidationError('License `{}` does not exist'.format(name))\n\n def __str__(self):\n return self.license_name\n\n\n<mask token>\n\n\nclass ContentNode(MPTTModel, models.Model):\n \"\"\"\n By default, all nodes have a title and can be used as a topic.\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=\n False, db_index=True)\n node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)\n original_channel_id = UUIDField(primary_key=False, editable=False, null\n =True, db_index=True)\n source_channel_id = UUIDField(primary_key=False, editable=False, null=True)\n original_source_node_id = UUIDField(primary_key=False, editable=False,\n null=True, db_index=True)\n source_node_id = UUIDField(primary_key=False, editable=False, null=True)\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n title = models.CharField(max_length=200, blank=True)\n description = models.TextField(blank=True)\n kind = models.ForeignKey('ContentKind', related_name='contentnodes',\n db_index=True, null=True, blank=True, on_delete=models.SET_NULL)\n license = models.ForeignKey('License', null=True, blank=True, on_delete\n =models.SET_NULL)\n license_description = models.CharField(max_length=400, null=True, blank\n =True)\n prerequisite = models.ManyToManyField('self', related_name=\n 'is_prerequisite_of', through='PrerequisiteContentRelationship',\n symmetrical=False, blank=True)\n is_related = models.ManyToManyField('self', related_name='relate_to',\n through='RelatedContentRelationship', symmetrical=False, blank=True)\n language = models.ForeignKey('Language', null=True, blank=True,\n related_name='content_language', on_delete=models.SET_NULL)\n parent = TreeForeignKey('self', null=True, blank=True, related_name=\n 'children', db_index=True, on_delete=models.CASCADE)\n tags = models.ManyToManyField(ContentTag, symmetrical=False,\n related_name='tagged_content', blank=True)\n sort_order = models.FloatField(max_length=50, default=1, verbose_name=\n 'sort order', help_text='Ascending, lowest number shown first')\n copyright_holder = models.CharField(max_length=200, null=True, blank=\n True, default='', help_text=\n 'Organization of person who holds the essential rights')\n original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='duplicates')\n cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='clones')\n thumbnail_encoding = models.TextField(blank=True, null=True)\n created = models.DateTimeField(default=timezone.now, verbose_name='created'\n )\n modified = models.DateTimeField(auto_now=True, verbose_name='modified')\n published = models.BooleanField(default=False)\n publishing = models.BooleanField(default=False)\n complete = models.BooleanField(null=True)\n changed = models.BooleanField(default=True)\n \"\"\"\n Extra fields for exercises:\n - type: mastery model to use to determine completion\n - m: m value for M out of N mastery criteria\n - n: n value for M out of N mastery criteria\n \"\"\"\n extra_fields = JSONField(default=dict, blank=True, null=True)\n author = models.CharField(max_length=200, blank=True, default='',\n help_text='Who created this content?', null=True)\n aggregator = models.CharField(max_length=200, blank=True, default='',\n help_text='Who gathered this content together?', null=True)\n provider = models.CharField(max_length=200, blank=True, default='',\n help_text='Who distributed this content?', null=True)\n role_visibility = models.CharField(max_length=50, choices=roles.choices,\n default=roles.LEARNER)\n freeze_authoring_data = models.BooleanField(default=False)\n grade_levels = models.JSONField(blank=True, null=True)\n resource_types = models.JSONField(blank=True, null=True)\n learning_activities = models.JSONField(blank=True, null=True)\n accessibility_labels = models.JSONField(blank=True, null=True)\n categories = models.JSONField(blank=True, null=True)\n learner_needs = models.JSONField(blank=True, null=True)\n suggested_duration = models.IntegerField(blank=True, null=True,\n help_text='Suggested duration for the content node (in seconds)')\n objects = CustomContentNodeTreeManager()\n _field_updates = FieldTracker()\n _permission_filter = Q(tree_id=OuterRef('tree_id'))\n\n @classmethod\n def _annotate_channel_id(cls, queryset):\n return queryset.annotate(channel_id=Subquery(Channel.objects.filter\n (main_tree__tree_id=OuterRef('tree_id')).values_list('id', flat\n =True)[:1]))\n\n @classmethod\n def filter_by_pk(cls, pk):\n \"\"\"\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always\n returns a queryset filtered by pk.\n\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode\n for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If\n a ContentNode does not exist for `pk` then an empty queryset is returned.\n \"\"\"\n query = ContentNode.objects.filter(pk=pk)\n if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:\n tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))\n if tree_id:\n query = query.filter(tree_id=tree_id)\n else:\n tree_id = ContentNode.objects.filter(pk=pk).values_list(\n 'tree_id', flat=True).first()\n if tree_id:\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk),\n tree_id, None)\n query = query.filter(tree_id=tree_id)\n else:\n query = query.none()\n return query\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('tree_id')).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n @raise_if_unsaved\n def get_root(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return super(ContentNode, self).get_root()\n\n @raise_if_unsaved\n def get_root_id(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return ContentNode.objects.values_list('pk', flat=True).get(tree_id\n =self._mpttfield('tree_id'), parent=None)\n\n def get_tree_data(self, levels=float('inf')):\n \"\"\"\n Returns `levels`-deep tree information starting at current node.\n Args:\n levels (int): depth of tree hierarchy to return\n Returns:\n tree (dict): starting with self, with children list containing either\n the just the children's `node_id`s or full recusive tree.\n \"\"\"\n if self.kind_id == content_kinds.TOPIC:\n node_data = {'title': self.title, 'kind': self.kind_id,\n 'node_id': self.node_id, 'studio_id': self.id}\n children = self.children.all()\n if levels > 0:\n node_data['children'] = [c.get_tree_data(levels=levels - 1) for\n c in children]\n return node_data\n if self.kind_id == content_kinds.EXERCISE:\n return {'title': self.title, 'kind': self.kind_id, 'count':\n self.assessment_items.count(), 'node_id': self.node_id,\n 'studio_id': self.id}\n return {'title': self.title, 'kind': self.kind_id, 'file_size':\n self.files.values('file_size').aggregate(size=Sum('file_size'))\n ['size'], 'node_id': self.node_id, 'studio_id': self.id}\n\n def get_original_node(self):\n original_node = self.original_node or self\n if self.original_channel_id and self.original_source_node_id:\n original_tree_id = Channel.objects.select_related('main_tree').get(\n pk=self.original_channel_id).main_tree.tree_id\n original_node = ContentNode.objects.filter(tree_id=\n original_tree_id, node_id=self.original_source_node_id).first(\n ) or ContentNode.objects.filter(tree_id=original_tree_id,\n content_id=self.content_id).first() or self\n return original_node\n\n def get_associated_presets(self):\n key = 'associated_presets_{}'.format(self.kind_id)\n cached_data = cache.get(key)\n if cached_data:\n return cached_data\n presets = list(FormatPreset.objects.filter(kind=self.kind).values())\n cache.set(key, presets, None)\n return presets\n\n def get_prerequisites(self):\n prerequisite_mapping = {}\n prerequisites = self.prerequisite.all()\n prereqlist = list(prerequisites)\n for prereq in prerequisites:\n prlist, prereqmapping = prereq.get_prerequisites()\n prerequisite_mapping.update({prereq.pk: prereqmapping})\n prereqlist.extend(prlist)\n return prereqlist, prerequisite_mapping\n\n def get_postrequisites(self):\n postrequisite_mapping = {}\n postrequisites = self.is_prerequisite_of.all()\n postreqlist = list(postrequisites)\n for postreq in postrequisites:\n prlist, postreqmapping = postreq.get_postrequisites()\n postrequisite_mapping.update({postreq.pk: postreqmapping})\n postreqlist.extend(prlist)\n return postreqlist, postrequisite_mapping\n\n def get_channel_id(self):\n if hasattr(self, 'channel_id'):\n return self.channel_id\n channel = self.get_channel()\n if channel:\n return channel.id\n return None\n\n def get_channel(self):\n try:\n root = self.get_root()\n if not root:\n return None\n return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=\n root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(\n previous_tree=root)).first()\n except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):\n return None\n\n def get_thumbnail(self):\n if self.thumbnail_encoding:\n thumbnail_data = load_json_string(self.thumbnail_encoding)\n if type(thumbnail_data) is dict and thumbnail_data.get('base64'):\n return thumbnail_data['base64']\n thumbnail = self.files.filter(preset__thumbnail=True).first()\n if thumbnail:\n return generate_storage_url(str(thumbnail))\n return ''\n\n @classmethod\n def get_nodes_with_title(cls, title, limit_to_children_of=None):\n \"\"\"\n Returns all ContentNodes with a given title. If limit_to_children_of\n is passed in with an id, only look at all the children of the node with that id.\n \"\"\"\n if limit_to_children_of:\n root = cls.objects.get(id=limit_to_children_of)\n return root.get_descendants().filter(title=title)\n return cls.objects.filter(title=title)\n\n def get_details(self, channel_id=None):\n \"\"\"\n Returns information about the node and its children, including total size, languages, files, etc.\n\n :return: A dictionary with detailed statistics and information about the node.\n \"\"\"\n from contentcuration.viewsets.common import SQArrayAgg\n from contentcuration.viewsets.common import SQCount\n from contentcuration.viewsets.common import SQRelatedArrayAgg\n from contentcuration.viewsets.common import SQSum\n from contentcuration.viewsets.common import SQJSONBKeyArrayAgg\n node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id\n ).order_by()\n descendants = self.get_descendants().values('id')\n if channel_id:\n channel = Channel.objects.filter(id=channel_id)[0]\n else:\n channel = self.get_channel()\n if not descendants.exists():\n data = {'last_update': pytz.utc.localize(datetime.now()).\n strftime(settings.DATE_TIME_FORMAT), 'created': self.\n created.strftime(settings.DATE_TIME_FORMAT),\n 'resource_count': 0, 'resource_size': 0, 'includes': {\n 'coach_content': 0, 'exercises': 0}, 'kind_count': [],\n 'languages': [], 'accessible_languages': [], 'licenses': [],\n 'tags': [], 'copyright_holders': [], 'authors': [],\n 'aggregators': [], 'providers': [], 'sample_pathway': [],\n 'original_channels': [], 'sample_nodes': [], 'levels': [],\n 'categories': []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None\n )\n return data\n resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()\n nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('checksum', 'file_size').\n order_by(), name='nodes')\n file_query = nodes.queryset().with_cte(nodes).values('checksum',\n 'file_size').distinct()\n l_nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('language_id', 'preset_id').\n order_by(), name='l_nodes')\n accessible_languages_query = l_nodes.queryset().filter(preset_id=\n format_presets.VIDEO_SUBTITLE).with_cte(l_nodes).values(\n 'language__native_name').distinct()\n tags_query = str(ContentTag.objects.filter(tagged_content__pk__in=\n descendants.values_list('pk', flat=True)).values('tag_name').\n annotate(count=Count('tag_name')).query).replace('topic', \"'topic'\"\n )\n kind_count_query = str(resources.values('kind_id').annotate(count=\n Count('kind_id')).query).replace('topic', \"'topic'\")\n node = node.annotate(resource_count=SQCount(resources, field='id'),\n resource_size=SQSum(file_query, field='file_size'),\n copyright_holders=SQArrayAgg(resources.distinct(\n 'copyright_holder').order_by('copyright_holder'), field=\n 'copyright_holder'), authors=SQArrayAgg(resources.distinct(\n 'author').order_by('author'), field='author'), aggregators=\n SQArrayAgg(resources.distinct('aggregator').order_by(\n 'aggregator'), field='aggregator'), providers=SQArrayAgg(\n resources.distinct('provider').order_by('provider'), field=\n 'provider'), languages=SQRelatedArrayAgg(descendants.exclude(\n language=None).distinct('language__native_name').order_by(),\n field='language__native_name', fieldname='native_name'),\n accessible_languages=SQRelatedArrayAgg(\n accessible_languages_query, field='language__native_name',\n fieldname='native_name'), licenses=SQRelatedArrayAgg(resources.\n exclude(license=None).distinct('license__license_name').\n order_by('license__license_name'), field=\n 'license__license_name', fieldname='license_name'), kind_count=\n RawSQL('SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format\n (kind_count_query), ()), tags_list=RawSQL(\n 'SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format(\n tags_query), ()), coach_content=SQCount(resources.filter(\n role_visibility=roles.COACH), field='id'), exercises=SQCount(\n resources.filter(kind_id=content_kinds.EXERCISE), field='id'),\n levels=SQJSONBKeyArrayAgg(descendants.exclude(\n grade_levels__isnull=True), field='grade_levels'),\n all_categories=SQJSONBKeyArrayAgg(descendants.exclude(\n categories__isnull=True), field='categories'))\n max_level = max(resources.values_list('level', flat=True).order_by(\n ).distinct() or [0])\n m_nodes = With(resources.values('id', 'level', 'tree_id', 'lft').\n order_by(), name='m_nodes')\n deepest_node_record = m_nodes.queryset().with_cte(m_nodes).filter(level\n =max_level).values('id').order_by('tree_id', 'lft').first()\n if deepest_node_record:\n deepest_node = ContentNode.objects.get(pk=deepest_node_record['id']\n )\n pathway = list(deepest_node.get_ancestors().order_by().exclude(\n parent=None).values('title', 'node_id', 'kind_id').order_by()\n ) if deepest_node_record else []\n sample_nodes = [{'node_id': n.node_id, 'title': n.title,\n 'description': n.description, 'thumbnail': n.get_thumbnail(),\n 'kind': n.kind_id} for n in deepest_node.get_siblings(\n include_self=True)[0:4]] if deepest_node_record else []\n channel_id = channel and channel.id\n originals = resources.values('original_channel_id').annotate(count=\n Count('original_channel_id')).order_by('original_channel_id')\n originals = {c['original_channel_id']: c['count'] for c in originals}\n original_channels = Channel.objects.exclude(pk=channel_id).filter(\n pk__in=originals.keys(), deleted=False).order_by()\n original_channels = [{'id': c.id, 'name': '{}{}'.format(c.name, _(\n ' (Original)') if channel_id == c.id else ''), 'thumbnail': c.\n get_thumbnail(), 'count': originals[c.id]} for c in\n original_channels]\n node = node.order_by().values('id', 'resource_count',\n 'resource_size', 'copyright_holders', 'authors', 'aggregators',\n 'providers', 'languages', 'accessible_languages',\n 'coach_content', 'licenses', 'tags_list', 'kind_count',\n 'exercises', 'levels', 'all_categories').first()\n for_educators = {'coach_content': node['coach_content'],\n 'exercises': node['exercises']}\n data = {'last_update': pytz.utc.localize(datetime.now()).strftime(\n settings.DATE_TIME_FORMAT), 'created': self.created.strftime(\n settings.DATE_TIME_FORMAT), 'resource_count': node.get(\n 'resource_count', 0), 'resource_size': node.get('resource_size',\n 0), 'includes': for_educators, 'kind_count': node.get(\n 'kind_count') or [], 'languages': node.get('languages') or [],\n 'accessible_languages': node.get('accessible_languages') or [],\n 'licenses': node.get('licenses') or [], 'tags': node.get(\n 'tags_list') or [], 'original_channels': original_channels,\n 'sample_pathway': pathway, 'sample_nodes': sample_nodes,\n 'authors': list(filter(bool, node['authors'])), 'aggregators':\n list(filter(bool, node['aggregators'])), 'providers': list(\n filter(bool, node['providers'])), 'copyright_holders': list(\n filter(bool, node['copyright_holders'])), 'levels': node.get(\n 'levels') or [], 'categories': node.get('all_categories') or []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None)\n return data\n\n def has_changes(self):\n mptt_opts = self._mptt_meta\n blacklist = set(['changed', 'modified', 'publishing', mptt_opts.\n tree_id_attr, mptt_opts.left_attr, mptt_opts.right_attr,\n mptt_opts.level_attr])\n original_values = self._field_updates.changed()\n return any(True for field in original_values if field not in blacklist)\n\n def recalculate_editors_storage(self):\n from contentcuration.utils.user import calculate_user_storage\n for editor in self.files.values_list('uploaded_by_id', flat=True\n ).distinct():\n calculate_user_storage(editor)\n\n def mark_complete(self):\n errors = []\n if not (bool(self.title) or self.parent_id is None):\n errors.append('Empty title')\n if self.kind_id != content_kinds.TOPIC:\n if not self.license:\n errors.append('Missing license')\n if (self.license and self.license.is_custom and not self.\n license_description):\n errors.append('Missing license description for custom license')\n if (self.license and self.license.copyright_holder_required and\n not self.copyright_holder):\n errors.append('Missing required copyright holder')\n if (self.kind_id != content_kinds.EXERCISE and not self.files.\n filter(preset__supplementary=False).exists()):\n errors.append('Missing default file')\n if self.kind_id == content_kinds.EXERCISE:\n if not self.assessment_items.filter(~Q(raw_data='') | ~Q(\n question='') & ~Q(answers='[]') & (Q(type=exercises.\n INPUT_QUESTION) | Q(answers__iregex='\"correct\":\\\\s*true'))\n ).exists():\n errors.append(\n 'No questions with question text and complete answers')\n criterion = self.extra_fields.get('options', {}).get(\n 'completion_criteria')\n if not (self.extra_fields.get('mastery_model') or criterion):\n errors.append('Missing mastery criterion')\n if criterion:\n try:\n completion_criteria.validate(criterion, kind=\n content_kinds.EXERCISE)\n except completion_criteria.ValidationError:\n errors.append(\n 'Mastery criterion is defined but is invalid')\n self.complete = not errors\n return errors\n\n def make_content_id_unique(self):\n \"\"\"\n If self is NOT an original contentnode (in other words, a copied contentnode)\n and a contentnode with same content_id exists then we update self's content_id.\n \"\"\"\n is_node_original = (self.original_source_node_id is None or self.\n original_source_node_id == self.node_id)\n node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(\n content_id=self.content_id)\n if not is_node_original and node_same_content_id.exists():\n ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.\n uuid4().hex)\n\n def on_create(self):\n self.changed = True\n self.recalculate_editors_storage()\n self.set_default_learning_activity()\n\n def on_update(self):\n self.changed = self.changed or self.has_changes()\n\n def move_to(self, target, *args, **kwargs):\n parent_was_trashtree = self.parent.channel_trash.exists()\n super(ContentNode, self).move_to(target, *args, **kwargs)\n self.save()\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.\n tree_id, None)\n if target.channel_trash.exists() or parent_was_trashtree:\n self.recalculate_editors_storage()\n\n def set_default_learning_activity(self):\n if self.learning_activities is None:\n if self.kind in kind_activity_map:\n self.learning_activities = {kind_activity_map[self.kind]: True}\n\n def save(self, skip_lock=False, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n old_parent_id = self._field_updates.changed().get('parent_id')\n if self._state.adding and (self.parent_id or self.parent):\n same_order = False\n elif old_parent_id is DeferredAttribute:\n same_order = True\n else:\n same_order = old_parent_id == self.parent_id\n if not same_order:\n changed_ids = list(filter(lambda x: x is not None, set([\n old_parent_id, self.parent_id])))\n else:\n changed_ids = []\n if not same_order and not skip_lock:\n with ContentNode.objects.lock_mptt(*ContentNode.objects.filter(\n id__in=[pid for pid in [old_parent_id, self.parent_id] if\n pid]).values_list('tree_id', flat=True).distinct()):\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(\n changed=True)\n else:\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(changed\n =True)\n save.alters_data = True\n\n def delete(self, *args, **kwargs):\n parent = self.parent or self._field_updates.changed().get('parent')\n if parent:\n parent.changed = True\n parent.save()\n self.recalculate_editors_storage()\n with ContentNode.objects.lock_mptt(self.tree_id):\n return super(ContentNode, self).delete(*args, **kwargs)\n delete.alters_data = True\n\n def copy_to(self, target=None, position='last-child', pk=None, mods=\n None, excluded_descendants=None, can_edit_source_channel=None,\n batch_size=None, progress_tracker=None):\n return self._tree_manager.copy_node(self, target, position, pk,\n mods, excluded_descendants, can_edit_source_channel, batch_size,\n progress_tracker)[0]\n\n def copy(self):\n return self.copy_to()\n\n def is_publishable(self):\n return self.complete and self.get_descendants(include_self=True\n ).exclude(kind_id=content_kinds.TOPIC).exists()\n\n\n class Meta:\n verbose_name = 'Topic'\n verbose_name_plural = 'Topics'\n indexes = [models.Index(fields=['node_id'], name=NODE_ID_INDEX_NAME\n ), models.Index(fields=['-modified'], name=\n NODE_MODIFIED_DESC_INDEX_NAME)]\n\n\nclass ContentKind(models.Model):\n kind = models.CharField(primary_key=True, max_length=200, choices=\n content_kinds.choices)\n\n def __str__(self):\n return self.kind\n\n\nclass FileFormat(models.Model):\n extension = models.CharField(primary_key=True, max_length=40, choices=\n file_formats.choices)\n mimetype = models.CharField(max_length=200, blank=True)\n\n def __str__(self):\n return self.extension\n\n\nclass FormatPreset(models.Model):\n id = models.CharField(primary_key=True, max_length=150, choices=\n format_presets.choices)\n readable_name = models.CharField(max_length=400)\n multi_language = models.BooleanField(default=False)\n supplementary = models.BooleanField(default=False)\n thumbnail = models.BooleanField(default=False)\n subtitle = models.BooleanField(default=False)\n display = models.BooleanField(default=True)\n order = models.IntegerField(default=0)\n kind = models.ForeignKey(ContentKind, related_name='format_presets',\n null=True, on_delete=models.SET_NULL)\n allowed_formats = models.ManyToManyField(FileFormat, blank=True)\n\n def __str__(self):\n return self.id\n\n @classmethod\n def guess_format_preset(cls, filename):\n \"\"\"\n Guess the format preset of a filename based on its extension.\n\n Return None if format is unknown.\n \"\"\"\n _, ext = os.path.splitext(filename)\n ext = ext.lstrip('.')\n f = FormatPreset.objects.filter(allowed_formats__extension=ext,\n display=True)\n return f.first()\n\n @classmethod\n def get_preset(cls, preset_name):\n \"\"\"\n Get the FormatPreset object with that exact name.\n\n Returns None if that format preset is not found.\n \"\"\"\n try:\n return FormatPreset.objects.get(id=preset_name)\n except FormatPreset.DoesNotExist:\n return None\n\n\nclass Language(models.Model):\n id = models.CharField(max_length=14, primary_key=True)\n lang_code = models.CharField(max_length=3, db_index=True)\n lang_subcode = models.CharField(max_length=10, db_index=True, blank=\n True, null=True)\n readable_name = models.CharField(max_length=100, blank=True)\n native_name = models.CharField(max_length=100, blank=True)\n lang_direction = models.CharField(max_length=3, choices=languages.\n LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])\n\n def ietf_name(self):\n return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.\n lang_subcode) if self.lang_subcode else self.lang_code\n\n def __str__(self):\n return self.ietf_name()\n\n\n<mask token>\n\n\nclass AssessmentItem(models.Model):\n type = models.CharField(max_length=50, default='multiplechoice')\n question = models.TextField(blank=True)\n hints = models.TextField(default='[]')\n answers = models.TextField(default='[]')\n order = models.IntegerField(default=1)\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'assessment_items', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,\n editable=False)\n raw_data = models.TextField(blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n randomize = models.BooleanField(default=False)\n deleted = models.BooleanField(default=False)\n objects = CustomManager()\n _field_updates = FieldTracker()\n\n def has_changes(self):\n return bool(self._field_updates.changed())\n\n\n class Meta:\n indexes = [models.Index(fields=['assessment_id'], name=\n ASSESSMENT_ID_INDEX_NAME)]\n unique_together = ['contentnode', 'assessment_id']\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'\n )).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n def on_create(self):\n \"\"\"\n When an exercise is added to a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n \"\"\"\n When an exercise is updated of a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n When an exercise is deleted from a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n return super(AssessmentItem, self).delete(*args, **kwargs)\n\n\nclass SlideshowSlide(models.Model):\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'slideshow_slides', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n sort_order = models.FloatField(default=1.0)\n metadata = JSONField(default=dict)\n\n\nclass StagedFile(models.Model):\n \"\"\"\n Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit\n \"\"\"\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='staged_files',\n blank=True, null=True, on_delete=models.CASCADE)\n\n\n<mask token>\n\n\nclass File(models.Model):\n \"\"\"\n The bottom layer of the contentDB schema, defines the basic building brick for content.\n Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n file_on_disk = models.FileField(upload_to=object_storage_name, storage=\n default_storage, max_length=500, blank=True)\n contentnode = models.ForeignKey(ContentNode, related_name='files',\n blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n assessment_item = models.ForeignKey(AssessmentItem, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n file_format = models.ForeignKey(FileFormat, related_name='files', blank\n =True, null=True, db_index=True, on_delete=models.SET_NULL)\n preset = models.ForeignKey(FormatPreset, related_name='files', blank=\n True, null=True, db_index=True, on_delete=models.SET_NULL)\n language = models.ForeignKey(Language, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n original_filename = models.CharField(max_length=255, blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n modified = models.DateTimeField(auto_now=True, verbose_name='modified',\n null=True)\n duration = models.IntegerField(blank=True, null=True)\n objects = CustomManager()\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(\n tree_id=OuterRef('assessment_item__contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.\n _permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(edit=True) | Q(uploaded_by=user,\n contentnode__isnull=True, assessment_item__isnull=True))\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True).filter(Q(main_tree__tree_id=OuterRef(\n 'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(\n 'assessment_item__contentnode__tree_id'))).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |\n Q(uploaded_by=user, contentnode__isnull=True,\n assessment_item__isnull=True))\n\n\n class Admin:\n pass\n\n def __str__(self):\n return '{checksum}{extension}'.format(checksum=self.checksum,\n extension='.' + self.file_format.extension)\n\n def filename(self):\n \"\"\"\n Returns just the filename of the File in storage, without the path\n\n e.g. abcd.mp4\n \"\"\"\n return os.path.basename(self.file_on_disk.name)\n\n def update_contentnode_content_id(self):\n \"\"\"\n If the file is attached to a contentnode and is not a thumbnail\n then update that contentnode's content_id if it's a copied contentnode.\n \"\"\"\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n self.modified = timezone.now()\n self.update_contentnode_content_id()\n\n def save(self, set_by_file_on_disk=True, *args, **kwargs):\n \"\"\"\n Overrider the default save method.\n If the file_on_disk FileField gets passed a content copy:\n 1. generate the MD5 from the content copy\n 2. fill the other fields accordingly\n \"\"\"\n from contentcuration.utils.user import calculate_user_storage\n if self.file_format_id:\n if self.file_format_id not in dict(file_formats.choices):\n raise ValidationError('Invalid file_format')\n if set_by_file_on_disk and self.file_on_disk:\n if self.checksum is None or self.checksum == '':\n md5 = hashlib.md5()\n for chunk in self.file_on_disk.chunks():\n md5.update(chunk)\n self.checksum = md5.hexdigest()\n if not self.file_size:\n self.file_size = self.file_on_disk.size\n if not self.file_format_id:\n ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')\n if ext in list(dict(file_formats.choices).keys()):\n self.file_format_id = ext\n else:\n raise ValueError('Files of type `{}` are not supported.'\n .format(ext))\n super(File, self).save(*args, **kwargs)\n if self.uploaded_by_id:\n calculate_user_storage(self.uploaded_by_id)\n\n\n class Meta:\n indexes = [models.Index(fields=['checksum', 'file_size'], name=\n FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],\n name=FILE_MODIFIED_DESC_INDEX_NAME)]\n constraints = [models.CheckConstraint(check=Q(preset__in=\n MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name\n =FILE_DURATION_CONSTRAINT)]\n\n\n<mask token>\n\n\nclass PrerequisiteContentRelationship(models.Model):\n \"\"\"\n Predefine the prerequisite relationship between two ContentNode objects.\n \"\"\"\n target_node = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)\n prerequisite = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['target_node', 'prerequisite']\n\n def clean(self, *args, **kwargs):\n if self.target_node == self.prerequisite:\n raise IntegrityError('Cannot self reference as prerequisite.')\n if PrerequisiteContentRelationship.objects.using(self._state.db\n ).filter(target_node=self.prerequisite, prerequisite=self.\n target_node):\n raise IntegrityError(\n 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'\n % (self.target_node, self.prerequisite))\n super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super(PrerequisiteContentRelationship, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n\nclass RelatedContentRelationship(models.Model):\n \"\"\"\n Predefine the related relationship between two ContentNode objects.\n \"\"\"\n contentnode_1 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_1', on_delete=models.CASCADE)\n contentnode_2 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_2', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['contentnode_1', 'contentnode_2']\n\n def save(self, *args, **kwargs):\n if self.contentnode_1 == self.contentnode_2:\n raise IntegrityError('Cannot self reference as related.')\n if RelatedContentRelationship.objects.using(self._state.db).filter(\n contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1\n ):\n return\n super(RelatedContentRelationship, self).save(*args, **kwargs)\n\n\nclass Invitation(models.Model):\n \"\"\" Invitation to edit channel \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n accepted = models.BooleanField(default=False)\n declined = models.BooleanField(default=False)\n revoked = models.BooleanField(default=False)\n invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n SET_NULL, null=True, related_name='sent_to')\n share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)\n email = models.EmailField(max_length=100, null=True)\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\n 'sent_by', null=True, on_delete=models.CASCADE)\n channel = models.ForeignKey('Channel', null=True, related_name=\n 'pending_editors', on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100, blank=True)\n last_name = models.CharField(max_length=100, blank=True, null=True)\n\n\n class Meta:\n verbose_name = 'Invitation'\n verbose_name_plural = 'Invitations'\n\n def accept(self):\n user = User.objects.filter(email__iexact=self.email).first()\n if self.channel:\n if self.share_mode == VIEW_ACCESS:\n self.channel.editors.remove(user)\n self.channel.viewers.add(user)\n else:\n self.channel.viewers.remove(user)\n self.channel.editors.add(user)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user)).distinct()\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user) | Q(channel__viewers=user)).distinct()\n\n\nclass Change(models.Model):\n server_rev = models.BigAutoField(primary_key=True)\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,\n blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')\n channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=\n models.CASCADE)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=\n True, on_delete=models.CASCADE, related_name='changes_about_user')\n client_rev = models.IntegerField(null=True, blank=True)\n session = models.ForeignKey(Session, null=True, blank=True, on_delete=\n models.SET_NULL)\n table = models.CharField(max_length=32)\n change_type = models.IntegerField()\n kwargs = JSONField(encoder=JSONEncoder)\n applied = models.BooleanField(default=False)\n errored = models.BooleanField(default=False)\n\n @classmethod\n def _create_from_change(cls, created_by_id=None, channel_id=None,\n user_id=None, session_key=None, applied=False, table=None, rev=None,\n **data):\n change_type = data.pop('type')\n if table is None or table not in ALL_TABLES:\n raise TypeError(\n 'table is a required argument for creating changes and must be a valid table name'\n )\n if change_type is None or change_type not in ALL_CHANGES:\n raise TypeError(\n 'change_type is a required argument for creating changes and must be a valid change type integer'\n )\n return cls(session_id=session_key, created_by_id=created_by_id,\n channel_id=channel_id, user_id=user_id, client_rev=rev, table=\n table, change_type=change_type, kwargs=data, applied=applied)\n\n @classmethod\n def create_changes(cls, changes, created_by_id=None, session_key=None,\n applied=False):\n change_models = []\n for change in changes:\n change_models.append(cls._create_from_change(created_by_id=\n created_by_id, session_key=session_key, applied=applied, **\n change))\n cls.objects.bulk_create(change_models)\n return change_models\n\n @classmethod\n def create_change(cls, change, created_by_id=None, session_key=None,\n applied=False):\n obj = cls._create_from_change(created_by_id=created_by_id,\n session_key=session_key, applied=applied, **change)\n obj.save()\n return obj\n\n @classmethod\n def serialize(cls, change):\n datum = get_attribute(change, ['kwargs']).copy()\n datum.update({'server_rev': get_attribute(change, ['server_rev']),\n 'table': get_attribute(change, ['table']), 'type':\n get_attribute(change, ['change_type']), 'channel_id':\n get_attribute(change, ['channel_id']), 'user_id': get_attribute\n (change, ['user_id']), 'created_by_id': get_attribute(change, [\n 'created_by_id'])})\n return datum\n\n def serialize_to_change_dict(self):\n return self.serialize(self)\n\n\nclass TaskResultCustom(object):\n \"\"\"\n Custom fields to add to django_celery_results's TaskResult model\n\n If adding fields to this class, run `makemigrations` then move the generated migration from the\n `django_celery_results` app to the `contentcuration` app and override the constructor to change\n the app_label. See `0141_add_task_signature` for an example\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',\n on_delete=models.CASCADE, null=True)\n channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)\n progress = models.IntegerField(null=True, blank=True, validators=[\n MinValueValidator(0), MaxValueValidator(100)])\n signature = models.CharField(null=True, blank=False, max_length=32)\n super_as_dict = TaskResult.as_dict\n\n def as_dict(self):\n \"\"\"\n :return: A dictionary representation\n \"\"\"\n super_dict = self.super_as_dict()\n super_dict.update(user_id=self.user_id, channel_id=self.channel_id,\n progress=self.progress)\n return super_dict\n\n @classmethod\n def contribute_to_class(cls, model_class=TaskResult):\n \"\"\"\n Adds fields to model, by default TaskResult\n :param model_class: TaskResult model\n \"\"\"\n for field in dir(cls):\n if not field.startswith('_') and field not in (\n 'contribute_to_class', 'Meta'):\n model_class.add_to_class(field, getattr(cls, field))\n setattr(model_class._meta, 'indexes', getattr(model_class._meta,\n 'indexes', []) + cls.Meta.indexes)\n\n\n class Meta:\n indexes = [models.Index(fields=['signature'], name=\n 'task_result_signature_idx', condition=Q(status__in=\n celery_states.UNREADY_STATES))]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass UUIDField(models.CharField):\n\n def __init__(self, *args, **kwargs):\n kwargs['max_length'] = 32\n super(UUIDField, self).__init__(*args, **kwargs)\n <mask token>\n\n def get_default(self):\n result = super(UUIDField, self).get_default()\n if isinstance(result, uuid.UUID):\n result = result.hex\n return result\n <mask token>\n\n\nclass MPTTTreeIDManager(models.Model):\n \"\"\"\n Because MPTT uses plain integers for tree IDs and does not use an auto-incrementing field for them,\n the same ID can sometimes be assigned to two trees if two channel create ops happen concurrently.\n\n As we are using this table only for the ID generation, it does not need any fields.\n\n We resolve this by creating a dummy table and using its ID as the tree index to take advantage of the db's\n concurrency-friendly way of generating sequential integer IDs. There is a custom migration that ensures\n that the number of records (and thus id) matches the max tree ID number when this table gets added.\n \"\"\"\n\n\n<mask token>\n\n\nclass FileOnDiskStorage(FileSystemStorage):\n \"\"\"\n Overrider FileSystemStorage's default save method to ignore duplicated file.\n \"\"\"\n\n def get_available_name(self, name):\n return name\n\n def _save(self, name, content):\n if self.exists(name):\n logging.warn('Content copy \"%s\" already exists!' % name)\n return name\n return super(FileOnDiskStorage, self)._save(name, content)\n\n\nclass SecretToken(models.Model):\n \"\"\"Tokens for channels\"\"\"\n token = models.CharField(max_length=100, unique=True)\n is_primary = models.BooleanField(default=False)\n\n @classmethod\n def exists(cls, token):\n \"\"\"\n Return true when the token string given by string already exists.\n Returns false otherwise.\n \"\"\"\n return cls.objects.filter(token=token).exists()\n\n @classmethod\n def generate_new_token(cls):\n \"\"\"\n Creates a primary secret token for the current channel using a proquint\n string. Creates a secondary token containing the channel id.\n\n These tokens can be used to refer to the channel to download its content\n database.\n \"\"\"\n token = proquint.generate()\n TRIALS = 100\n for __ in range(TRIALS):\n token = proquint.generate()\n if SecretToken.exists(token):\n continue\n break\n else:\n raise ValueError('Cannot generate new token')\n return token\n\n def __str__(self):\n return '{}-{}'.format(self.token[:5], self.token[5:])\n\n\n<mask token>\n\n\nclass PermissionCTE(With):\n tree_id_fields = ['channel__{}__tree_id'.format(tree_name) for\n tree_name in CHANNEL_TREES]\n\n def __init__(self, model, user_id, **kwargs):\n queryset = model.objects.filter(user_id=user_id).annotate(tree_id=\n Unnest(ArrayRemove(Array(*self.tree_id_fields), None),\n output_field=models.IntegerField()))\n super(PermissionCTE, self).__init__(queryset=queryset.values(\n 'user_id', 'channel_id', 'tree_id'), **kwargs)\n\n @classmethod\n def editable_channels(cls, user_id):\n return PermissionCTE(User.editable_channels.through, user_id, name=\n 'editable_channels_cte')\n\n @classmethod\n def view_only_channels(cls, user_id):\n return PermissionCTE(User.view_only_channels.through, user_id, name\n ='view_only_channels_cte')\n\n def exists(self, *filters):\n return Exists(self.queryset().filter(*filters).values('user_id'))\n\n\nclass Channel(models.Model):\n \"\"\" Permissions come from association with organizations \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n tagline = models.CharField(max_length=150, blank=True, null=True)\n version = models.IntegerField(default=0)\n thumbnail = models.TextField(blank=True, null=True)\n thumbnail_encoding = JSONField(default=dict)\n editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='editable_channels', verbose_name='editors', help_text=\n 'Users with edit rights', blank=True)\n viewers = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='view_only_channels', verbose_name='viewers', help_text=\n 'Users with view only rights', blank=True)\n language = models.ForeignKey('Language', null=True, blank=True,\n related_name='channel_language', on_delete=models.SET_NULL)\n trash_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_trash', on_delete=models.SET_NULL)\n clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_clipboard', on_delete=models.SET_NULL)\n main_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_main', on_delete=models.SET_NULL)\n staging_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_staging', on_delete=models.SET_NULL)\n chef_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_chef', on_delete=models.SET_NULL)\n previous_tree = models.ForeignKey('ContentNode', null=True, blank=True,\n related_name='channel_previous', on_delete=models.SET_NULL)\n bookmarked_by = models.ManyToManyField(settings.AUTH_USER_MODEL,\n related_name='bookmarked_channels', verbose_name='bookmarked by')\n deleted = models.BooleanField(default=False, db_index=True)\n public = models.BooleanField(default=False, db_index=True)\n preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)\n content_defaults = JSONField(default=dict)\n priority = models.IntegerField(default=0, help_text=\n 'Order to display public channels')\n last_published = models.DateTimeField(blank=True, null=True)\n secret_tokens = models.ManyToManyField(SecretToken, related_name=\n 'channels', verbose_name='secret tokens', blank=True)\n source_url = models.CharField(max_length=200, blank=True, null=True)\n demo_server_url = models.CharField(max_length=200, blank=True, null=True)\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n ricecooker_version = models.CharField(max_length=100, blank=True, null=True\n )\n published_data = JSONField(default=dict)\n icon_encoding = models.TextField(blank=True, null=True)\n total_resource_count = models.IntegerField(default=0)\n published_kind_count = models.TextField(blank=True, null=True)\n published_size = models.FloatField(default=0)\n included_languages = models.ManyToManyField('Language', related_name=\n 'channels', verbose_name='languages', blank=True)\n _field_updates = FieldTracker(fields=['description', 'language_id',\n 'thumbnail', 'name', 'thumbnail_encoding', 'deleted', 'public',\n 'main_tree_id', 'version'])\n\n @classmethod\n def get_editable(cls, user, channel_id):\n return cls.filter_edit_queryset(cls.objects.all(), user).get(id=\n channel_id)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit = Exists(User.editable_channels.through.objects.filter(user_id\n =user_id, channel_id=OuterRef('id')))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n user_email = not user.is_anonymous and user.email\n if user_id:\n filters = dict(user_id=user_id, channel_id=OuterRef('id'))\n edit = Exists(User.editable_channels.through.objects.filter(**\n filters).values('user_id'))\n view = Exists(User.view_only_channels.through.objects.filter(**\n filters).values('user_id'))\n else:\n edit = boolean_val(False)\n view = boolean_val(False)\n queryset = queryset.annotate(edit=edit, view=view)\n if user_id and user.is_admin:\n return queryset\n permission_filter = Q()\n if user_id:\n pending_channels = Invitation.objects.filter(email=user_email,\n revoked=False, declined=False, accepted=False).values_list(\n 'channel_id', flat=True)\n permission_filter = Q(view=True) | Q(edit=True) | Q(deleted=\n False, id__in=pending_channels)\n return queryset.filter(permission_filter | Q(deleted=False, public=\n True))\n\n @classmethod\n def get_all_channels(cls):\n return cls.objects.select_related('main_tree').prefetch_related(\n 'editors', 'viewers').distinct()\n\n def resource_size_key(self):\n return '{}_resource_size'.format(self.pk)\n\n def get_resource_size(self):\n cached_data = cache.get(self.resource_size_key())\n if cached_data:\n return cached_data\n tree_id = self.main_tree.tree_id\n files = File.objects.select_related('contentnode', 'assessment_item'\n ).filter(contentnode__tree_id=tree_id).values('checksum',\n 'file_size').distinct().aggregate(resource_size=Sum('file_size'))\n cache.set(self.resource_size_key(), files['resource_size'] or 0, None)\n return files['resource_size'] or 0\n\n def on_create(self):\n record_channel_stats(self, None)\n if not self.content_defaults:\n self.content_defaults = DEFAULT_CONTENT_DEFAULTS\n if not self.main_tree:\n self.main_tree = ContentNode.objects.create(title=self.name,\n kind_id=content_kinds.TOPIC, content_id=self.id, node_id=\n self.id, original_channel_id=self.id, source_channel_id=\n self.id, changed=True, complete=True)\n if settings.DEBUG:\n if ContentNode.objects.filter(parent=None, tree_id=self.\n main_tree.tree_id).count() != 1:\n raise AssertionError\n if not self.trash_tree:\n self.trash_tree = ContentNode.objects.create(title=self.name,\n kind_id=content_kinds.TOPIC, content_id=self.id, node_id=\n self.id)\n if self.public and (self.main_tree and self.main_tree.published):\n delete_public_channel_cache_keys()\n\n def on_update(self):\n from contentcuration.utils.user import calculate_user_storage\n original_values = self._field_updates.changed()\n record_channel_stats(self, original_values)\n blacklist = set(['public', 'main_tree_id', 'version'])\n if self.main_tree and original_values and any(True for field in\n original_values if field not in blacklist):\n self.main_tree.changed = True\n if 'thumbnail' in original_values and original_values['thumbnail'\n ] and 'static' not in original_values['thumbnail']:\n filename, ext = os.path.splitext(original_values['thumbnail'])\n delete_empty_file_reference(filename, ext[1:])\n if 'deleted' in original_values:\n for editor in self.editors.all():\n calculate_user_storage(editor.pk)\n if 'deleted' in original_values and not original_values['deleted']:\n self.pending_editors.all().delete()\n export_db_storage_path = os.path.join(settings.DB_ROOT,\n '{channel_id}.sqlite3'.format(channel_id=self.id))\n if default_storage.exists(export_db_storage_path):\n default_storage.delete(export_db_storage_path)\n if self.main_tree:\n self.main_tree.published = False\n if self.main_tree and self.main_tree._field_updates.changed():\n self.main_tree.save()\n if 'public' in original_values and (self.main_tree and self.\n main_tree.published):\n delete_public_channel_cache_keys()\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n super(Channel, self).save(*args, **kwargs)\n\n def get_thumbnail(self):\n return get_channel_thumbnail(self)\n\n def has_changes(self):\n return self.main_tree.get_descendants(include_self=True).filter(changed\n =True).exists()\n\n def get_date_modified(self):\n return self.main_tree.get_descendants(include_self=True).aggregate(\n last_modified=Max('modified'))['last_modified']\n\n def get_resource_count(self):\n return self.main_tree.get_descendants().exclude(kind_id=\n content_kinds.TOPIC).order_by('content_id').distinct('content_id'\n ).count()\n\n def get_human_token(self):\n return self.secret_tokens.get(is_primary=True)\n\n def get_channel_id_token(self):\n return self.secret_tokens.get(token=self.id)\n\n def make_token(self):\n token = self.secret_tokens.create(token=SecretToken.\n generate_new_token(), is_primary=True)\n self.secret_tokens.get_or_create(token=self.id)\n return token\n\n def make_public(self, bypass_signals=False):\n \"\"\"\n Sets the current channel object to be public and viewable by anyone.\n\n If bypass_signals is True, update the model in such a way that we\n prevent any model signals from running due to the update.\n\n Returns the same channel object.\n \"\"\"\n if bypass_signals:\n self.public = True\n Channel.objects.filter(id=self.id).update(public=True)\n delete_public_channel_cache_keys()\n else:\n self.public = True\n self.save()\n return self\n\n def mark_created(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n CREATION)\n\n def mark_publishing(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n PUBLICATION)\n self.main_tree.publishing = True\n self.main_tree.save()\n\n def mark_deleted(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n DELETION)\n self.deleted = True\n self.save()\n\n def mark_recovered(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.\n RECOVERY)\n self.deleted = False\n self.save()\n\n @property\n def deletion_history(self):\n return self.history.filter(action=channel_history.DELETION)\n\n @property\n def publishing_history(self):\n return self.history.filter(action=channel_history.PUBLICATION)\n\n @classmethod\n def get_public_channels(cls, defer_nonmain_trees=False):\n \"\"\"\n Get all public channels.\n\n If defer_nonmain_trees is True, defer the loading of all\n trees except for the main_tree.\"\"\"\n if defer_nonmain_trees:\n c = Channel.objects.filter(public=True).exclude(deleted=True\n ).select_related('main_tree').prefetch_related('editors'\n ).defer('trash_tree', 'clipboard_tree', 'staging_tree',\n 'chef_tree', 'previous_tree', 'viewers')\n else:\n c = Channel.objects.filter(public=True).exclude(deleted=True)\n return c\n\n\n class Meta:\n verbose_name = 'Channel'\n verbose_name_plural = 'Channels'\n indexes = [models.Index(fields=['name'], name=CHANNEL_NAME_INDEX_NAME)]\n index_together = [['deleted', 'public']]\n\n\n<mask token>\n\n\nclass ChannelHistory(models.Model):\n \"\"\"\n Model for tracking certain actions performed on a channel\n \"\"\"\n channel = models.ForeignKey('Channel', null=False, blank=False,\n related_name='history', on_delete=models.CASCADE)\n actor = models.ForeignKey('User', null=False, blank=False, related_name\n ='channel_history', on_delete=models.CASCADE)\n performed = models.DateTimeField(default=timezone.now)\n action = models.CharField(max_length=50, choices=channel_history.choices)\n\n @classmethod\n def prune(cls):\n \"\"\"\n Prunes history records by keeping the most recent actions for each channel and type,\n and deleting all other older actions\n \"\"\"\n keep_ids = cls.objects.distinct('channel_id', 'action').order_by(\n 'channel_id', 'action', '-performed').values_list('id', flat=True)\n cls.objects.exclude(id__in=keep_ids).delete()\n\n\n class Meta:\n verbose_name = 'Channel history'\n verbose_name_plural = 'Channel histories'\n indexes = [models.Index(fields=['channel_id'], name=\n CHANNEL_HISTORY_CHANNEL_INDEX_NAME)]\n\n\nclass UserHistory(models.Model):\n \"\"\"\n Model that stores the user's action history.\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=\n False, related_name='history', on_delete=models.CASCADE)\n action = models.CharField(max_length=32, choices=user_history.choices)\n performed_at = models.DateTimeField(default=timezone.now)\n\n\nclass ChannelSet(models.Model):\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n public = models.BooleanField(default=False, db_index=True)\n editors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name\n ='channel_sets', verbose_name='editors', help_text=\n 'Users with edit rights', blank=True)\n secret_token = models.ForeignKey('SecretToken', null=True, blank=True,\n related_name='channel_sets', on_delete=models.SET_NULL)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n user_id = not user.is_anonymous and user.id\n edit = Exists(User.channel_sets.through.objects.filter(user_id=\n user_id, channelset_id=OuterRef('id')))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n return cls.filter_edit_queryset(queryset, user)\n\n def get_channels(self):\n if self.secret_token:\n return self.secret_token.channels.filter(deleted=False)\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n super(ChannelSet, self).save()\n\n def on_create(self):\n if not self.secret_token:\n self.secret_token = SecretToken.objects.create(token=\n SecretToken.generate_new_token())\n\n def delete(self, *args, **kwargs):\n super(ChannelSet, self).delete(*args, **kwargs)\n if self.secret_token:\n self.secret_token.delete()\n\n\nclass ContentTag(models.Model):\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n tag_name = models.CharField(max_length=50)\n channel = models.ForeignKey('Channel', related_name='tags', blank=True,\n null=True, db_index=True, on_delete=models.SET_NULL)\n objects = CustomManager()\n\n def __str__(self):\n return self.tag_name\n\n\n class Meta:\n unique_together = ['tag_name', 'channel']\n\n\nclass License(models.Model):\n \"\"\"\n Normalize the license of ContentNode model\n \"\"\"\n license_name = models.CharField(max_length=50)\n license_url = models.URLField(blank=True)\n license_description = models.TextField(blank=True)\n copyright_holder_required = models.BooleanField(default=True)\n is_custom = models.BooleanField(default=False)\n exists = models.BooleanField(default=False, verbose_name=\n 'license exists', help_text=\n 'Tells whether or not a content item is licensed to share')\n\n @classmethod\n def validate_name(cls, name):\n if cls.objects.filter(license_name=name).count() == 0:\n raise ValidationError('License `{}` does not exist'.format(name))\n\n def __str__(self):\n return self.license_name\n\n\n<mask token>\n\n\nclass ContentNode(MPTTModel, models.Model):\n \"\"\"\n By default, all nodes have a title and can be used as a topic.\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=\n False, db_index=True)\n node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)\n original_channel_id = UUIDField(primary_key=False, editable=False, null\n =True, db_index=True)\n source_channel_id = UUIDField(primary_key=False, editable=False, null=True)\n original_source_node_id = UUIDField(primary_key=False, editable=False,\n null=True, db_index=True)\n source_node_id = UUIDField(primary_key=False, editable=False, null=True)\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n title = models.CharField(max_length=200, blank=True)\n description = models.TextField(blank=True)\n kind = models.ForeignKey('ContentKind', related_name='contentnodes',\n db_index=True, null=True, blank=True, on_delete=models.SET_NULL)\n license = models.ForeignKey('License', null=True, blank=True, on_delete\n =models.SET_NULL)\n license_description = models.CharField(max_length=400, null=True, blank\n =True)\n prerequisite = models.ManyToManyField('self', related_name=\n 'is_prerequisite_of', through='PrerequisiteContentRelationship',\n symmetrical=False, blank=True)\n is_related = models.ManyToManyField('self', related_name='relate_to',\n through='RelatedContentRelationship', symmetrical=False, blank=True)\n language = models.ForeignKey('Language', null=True, blank=True,\n related_name='content_language', on_delete=models.SET_NULL)\n parent = TreeForeignKey('self', null=True, blank=True, related_name=\n 'children', db_index=True, on_delete=models.CASCADE)\n tags = models.ManyToManyField(ContentTag, symmetrical=False,\n related_name='tagged_content', blank=True)\n sort_order = models.FloatField(max_length=50, default=1, verbose_name=\n 'sort order', help_text='Ascending, lowest number shown first')\n copyright_holder = models.CharField(max_length=200, null=True, blank=\n True, default='', help_text=\n 'Organization of person who holds the essential rights')\n original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='duplicates')\n cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=\n True, blank=True, related_name='clones')\n thumbnail_encoding = models.TextField(blank=True, null=True)\n created = models.DateTimeField(default=timezone.now, verbose_name='created'\n )\n modified = models.DateTimeField(auto_now=True, verbose_name='modified')\n published = models.BooleanField(default=False)\n publishing = models.BooleanField(default=False)\n complete = models.BooleanField(null=True)\n changed = models.BooleanField(default=True)\n \"\"\"\n Extra fields for exercises:\n - type: mastery model to use to determine completion\n - m: m value for M out of N mastery criteria\n - n: n value for M out of N mastery criteria\n \"\"\"\n extra_fields = JSONField(default=dict, blank=True, null=True)\n author = models.CharField(max_length=200, blank=True, default='',\n help_text='Who created this content?', null=True)\n aggregator = models.CharField(max_length=200, blank=True, default='',\n help_text='Who gathered this content together?', null=True)\n provider = models.CharField(max_length=200, blank=True, default='',\n help_text='Who distributed this content?', null=True)\n role_visibility = models.CharField(max_length=50, choices=roles.choices,\n default=roles.LEARNER)\n freeze_authoring_data = models.BooleanField(default=False)\n grade_levels = models.JSONField(blank=True, null=True)\n resource_types = models.JSONField(blank=True, null=True)\n learning_activities = models.JSONField(blank=True, null=True)\n accessibility_labels = models.JSONField(blank=True, null=True)\n categories = models.JSONField(blank=True, null=True)\n learner_needs = models.JSONField(blank=True, null=True)\n suggested_duration = models.IntegerField(blank=True, null=True,\n help_text='Suggested duration for the content node (in seconds)')\n objects = CustomContentNodeTreeManager()\n _field_updates = FieldTracker()\n _permission_filter = Q(tree_id=OuterRef('tree_id'))\n\n @classmethod\n def _annotate_channel_id(cls, queryset):\n return queryset.annotate(channel_id=Subquery(Channel.objects.filter\n (main_tree__tree_id=OuterRef('tree_id')).values_list('id', flat\n =True)[:1]))\n\n @classmethod\n def filter_by_pk(cls, pk):\n \"\"\"\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always\n returns a queryset filtered by pk.\n\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode\n for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If\n a ContentNode does not exist for `pk` then an empty queryset is returned.\n \"\"\"\n query = ContentNode.objects.filter(pk=pk)\n if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:\n tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))\n if tree_id:\n query = query.filter(tree_id=tree_id)\n else:\n tree_id = ContentNode.objects.filter(pk=pk).values_list(\n 'tree_id', flat=True).first()\n if tree_id:\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk),\n tree_id, None)\n query = query.filter(tree_id=tree_id)\n else:\n query = query.none()\n return query\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('tree_id')).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n @raise_if_unsaved\n def get_root(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return super(ContentNode, self).get_root()\n\n @raise_if_unsaved\n def get_root_id(self):\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return ContentNode.objects.values_list('pk', flat=True).get(tree_id\n =self._mpttfield('tree_id'), parent=None)\n\n def get_tree_data(self, levels=float('inf')):\n \"\"\"\n Returns `levels`-deep tree information starting at current node.\n Args:\n levels (int): depth of tree hierarchy to return\n Returns:\n tree (dict): starting with self, with children list containing either\n the just the children's `node_id`s or full recusive tree.\n \"\"\"\n if self.kind_id == content_kinds.TOPIC:\n node_data = {'title': self.title, 'kind': self.kind_id,\n 'node_id': self.node_id, 'studio_id': self.id}\n children = self.children.all()\n if levels > 0:\n node_data['children'] = [c.get_tree_data(levels=levels - 1) for\n c in children]\n return node_data\n if self.kind_id == content_kinds.EXERCISE:\n return {'title': self.title, 'kind': self.kind_id, 'count':\n self.assessment_items.count(), 'node_id': self.node_id,\n 'studio_id': self.id}\n return {'title': self.title, 'kind': self.kind_id, 'file_size':\n self.files.values('file_size').aggregate(size=Sum('file_size'))\n ['size'], 'node_id': self.node_id, 'studio_id': self.id}\n\n def get_original_node(self):\n original_node = self.original_node or self\n if self.original_channel_id and self.original_source_node_id:\n original_tree_id = Channel.objects.select_related('main_tree').get(\n pk=self.original_channel_id).main_tree.tree_id\n original_node = ContentNode.objects.filter(tree_id=\n original_tree_id, node_id=self.original_source_node_id).first(\n ) or ContentNode.objects.filter(tree_id=original_tree_id,\n content_id=self.content_id).first() or self\n return original_node\n\n def get_associated_presets(self):\n key = 'associated_presets_{}'.format(self.kind_id)\n cached_data = cache.get(key)\n if cached_data:\n return cached_data\n presets = list(FormatPreset.objects.filter(kind=self.kind).values())\n cache.set(key, presets, None)\n return presets\n\n def get_prerequisites(self):\n prerequisite_mapping = {}\n prerequisites = self.prerequisite.all()\n prereqlist = list(prerequisites)\n for prereq in prerequisites:\n prlist, prereqmapping = prereq.get_prerequisites()\n prerequisite_mapping.update({prereq.pk: prereqmapping})\n prereqlist.extend(prlist)\n return prereqlist, prerequisite_mapping\n\n def get_postrequisites(self):\n postrequisite_mapping = {}\n postrequisites = self.is_prerequisite_of.all()\n postreqlist = list(postrequisites)\n for postreq in postrequisites:\n prlist, postreqmapping = postreq.get_postrequisites()\n postrequisite_mapping.update({postreq.pk: postreqmapping})\n postreqlist.extend(prlist)\n return postreqlist, postrequisite_mapping\n\n def get_channel_id(self):\n if hasattr(self, 'channel_id'):\n return self.channel_id\n channel = self.get_channel()\n if channel:\n return channel.id\n return None\n\n def get_channel(self):\n try:\n root = self.get_root()\n if not root:\n return None\n return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=\n root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(\n previous_tree=root)).first()\n except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):\n return None\n\n def get_thumbnail(self):\n if self.thumbnail_encoding:\n thumbnail_data = load_json_string(self.thumbnail_encoding)\n if type(thumbnail_data) is dict and thumbnail_data.get('base64'):\n return thumbnail_data['base64']\n thumbnail = self.files.filter(preset__thumbnail=True).first()\n if thumbnail:\n return generate_storage_url(str(thumbnail))\n return ''\n\n @classmethod\n def get_nodes_with_title(cls, title, limit_to_children_of=None):\n \"\"\"\n Returns all ContentNodes with a given title. If limit_to_children_of\n is passed in with an id, only look at all the children of the node with that id.\n \"\"\"\n if limit_to_children_of:\n root = cls.objects.get(id=limit_to_children_of)\n return root.get_descendants().filter(title=title)\n return cls.objects.filter(title=title)\n\n def get_details(self, channel_id=None):\n \"\"\"\n Returns information about the node and its children, including total size, languages, files, etc.\n\n :return: A dictionary with detailed statistics and information about the node.\n \"\"\"\n from contentcuration.viewsets.common import SQArrayAgg\n from contentcuration.viewsets.common import SQCount\n from contentcuration.viewsets.common import SQRelatedArrayAgg\n from contentcuration.viewsets.common import SQSum\n from contentcuration.viewsets.common import SQJSONBKeyArrayAgg\n node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id\n ).order_by()\n descendants = self.get_descendants().values('id')\n if channel_id:\n channel = Channel.objects.filter(id=channel_id)[0]\n else:\n channel = self.get_channel()\n if not descendants.exists():\n data = {'last_update': pytz.utc.localize(datetime.now()).\n strftime(settings.DATE_TIME_FORMAT), 'created': self.\n created.strftime(settings.DATE_TIME_FORMAT),\n 'resource_count': 0, 'resource_size': 0, 'includes': {\n 'coach_content': 0, 'exercises': 0}, 'kind_count': [],\n 'languages': [], 'accessible_languages': [], 'licenses': [],\n 'tags': [], 'copyright_holders': [], 'authors': [],\n 'aggregators': [], 'providers': [], 'sample_pathway': [],\n 'original_channels': [], 'sample_nodes': [], 'levels': [],\n 'categories': []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None\n )\n return data\n resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()\n nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('checksum', 'file_size').\n order_by(), name='nodes')\n file_query = nodes.queryset().with_cte(nodes).values('checksum',\n 'file_size').distinct()\n l_nodes = With(File.objects.filter(contentnode_id__in=Subquery(\n resources.values('id'))).values('language_id', 'preset_id').\n order_by(), name='l_nodes')\n accessible_languages_query = l_nodes.queryset().filter(preset_id=\n format_presets.VIDEO_SUBTITLE).with_cte(l_nodes).values(\n 'language__native_name').distinct()\n tags_query = str(ContentTag.objects.filter(tagged_content__pk__in=\n descendants.values_list('pk', flat=True)).values('tag_name').\n annotate(count=Count('tag_name')).query).replace('topic', \"'topic'\"\n )\n kind_count_query = str(resources.values('kind_id').annotate(count=\n Count('kind_id')).query).replace('topic', \"'topic'\")\n node = node.annotate(resource_count=SQCount(resources, field='id'),\n resource_size=SQSum(file_query, field='file_size'),\n copyright_holders=SQArrayAgg(resources.distinct(\n 'copyright_holder').order_by('copyright_holder'), field=\n 'copyright_holder'), authors=SQArrayAgg(resources.distinct(\n 'author').order_by('author'), field='author'), aggregators=\n SQArrayAgg(resources.distinct('aggregator').order_by(\n 'aggregator'), field='aggregator'), providers=SQArrayAgg(\n resources.distinct('provider').order_by('provider'), field=\n 'provider'), languages=SQRelatedArrayAgg(descendants.exclude(\n language=None).distinct('language__native_name').order_by(),\n field='language__native_name', fieldname='native_name'),\n accessible_languages=SQRelatedArrayAgg(\n accessible_languages_query, field='language__native_name',\n fieldname='native_name'), licenses=SQRelatedArrayAgg(resources.\n exclude(license=None).distinct('license__license_name').\n order_by('license__license_name'), field=\n 'license__license_name', fieldname='license_name'), kind_count=\n RawSQL('SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format\n (kind_count_query), ()), tags_list=RawSQL(\n 'SELECT json_agg(row_to_json (x)) FROM ({}) as x'.format(\n tags_query), ()), coach_content=SQCount(resources.filter(\n role_visibility=roles.COACH), field='id'), exercises=SQCount(\n resources.filter(kind_id=content_kinds.EXERCISE), field='id'),\n levels=SQJSONBKeyArrayAgg(descendants.exclude(\n grade_levels__isnull=True), field='grade_levels'),\n all_categories=SQJSONBKeyArrayAgg(descendants.exclude(\n categories__isnull=True), field='categories'))\n max_level = max(resources.values_list('level', flat=True).order_by(\n ).distinct() or [0])\n m_nodes = With(resources.values('id', 'level', 'tree_id', 'lft').\n order_by(), name='m_nodes')\n deepest_node_record = m_nodes.queryset().with_cte(m_nodes).filter(level\n =max_level).values('id').order_by('tree_id', 'lft').first()\n if deepest_node_record:\n deepest_node = ContentNode.objects.get(pk=deepest_node_record['id']\n )\n pathway = list(deepest_node.get_ancestors().order_by().exclude(\n parent=None).values('title', 'node_id', 'kind_id').order_by()\n ) if deepest_node_record else []\n sample_nodes = [{'node_id': n.node_id, 'title': n.title,\n 'description': n.description, 'thumbnail': n.get_thumbnail(),\n 'kind': n.kind_id} for n in deepest_node.get_siblings(\n include_self=True)[0:4]] if deepest_node_record else []\n channel_id = channel and channel.id\n originals = resources.values('original_channel_id').annotate(count=\n Count('original_channel_id')).order_by('original_channel_id')\n originals = {c['original_channel_id']: c['count'] for c in originals}\n original_channels = Channel.objects.exclude(pk=channel_id).filter(\n pk__in=originals.keys(), deleted=False).order_by()\n original_channels = [{'id': c.id, 'name': '{}{}'.format(c.name, _(\n ' (Original)') if channel_id == c.id else ''), 'thumbnail': c.\n get_thumbnail(), 'count': originals[c.id]} for c in\n original_channels]\n node = node.order_by().values('id', 'resource_count',\n 'resource_size', 'copyright_holders', 'authors', 'aggregators',\n 'providers', 'languages', 'accessible_languages',\n 'coach_content', 'licenses', 'tags_list', 'kind_count',\n 'exercises', 'levels', 'all_categories').first()\n for_educators = {'coach_content': node['coach_content'],\n 'exercises': node['exercises']}\n data = {'last_update': pytz.utc.localize(datetime.now()).strftime(\n settings.DATE_TIME_FORMAT), 'created': self.created.strftime(\n settings.DATE_TIME_FORMAT), 'resource_count': node.get(\n 'resource_count', 0), 'resource_size': node.get('resource_size',\n 0), 'includes': for_educators, 'kind_count': node.get(\n 'kind_count') or [], 'languages': node.get('languages') or [],\n 'accessible_languages': node.get('accessible_languages') or [],\n 'licenses': node.get('licenses') or [], 'tags': node.get(\n 'tags_list') or [], 'original_channels': original_channels,\n 'sample_pathway': pathway, 'sample_nodes': sample_nodes,\n 'authors': list(filter(bool, node['authors'])), 'aggregators':\n list(filter(bool, node['aggregators'])), 'providers': list(\n filter(bool, node['providers'])), 'copyright_holders': list(\n filter(bool, node['copyright_holders'])), 'levels': node.get(\n 'levels') or [], 'categories': node.get('all_categories') or []}\n cache.set('details_{}'.format(self.node_id), json.dumps(data), None)\n return data\n\n def has_changes(self):\n mptt_opts = self._mptt_meta\n blacklist = set(['changed', 'modified', 'publishing', mptt_opts.\n tree_id_attr, mptt_opts.left_attr, mptt_opts.right_attr,\n mptt_opts.level_attr])\n original_values = self._field_updates.changed()\n return any(True for field in original_values if field not in blacklist)\n\n def recalculate_editors_storage(self):\n from contentcuration.utils.user import calculate_user_storage\n for editor in self.files.values_list('uploaded_by_id', flat=True\n ).distinct():\n calculate_user_storage(editor)\n\n def mark_complete(self):\n errors = []\n if not (bool(self.title) or self.parent_id is None):\n errors.append('Empty title')\n if self.kind_id != content_kinds.TOPIC:\n if not self.license:\n errors.append('Missing license')\n if (self.license and self.license.is_custom and not self.\n license_description):\n errors.append('Missing license description for custom license')\n if (self.license and self.license.copyright_holder_required and\n not self.copyright_holder):\n errors.append('Missing required copyright holder')\n if (self.kind_id != content_kinds.EXERCISE and not self.files.\n filter(preset__supplementary=False).exists()):\n errors.append('Missing default file')\n if self.kind_id == content_kinds.EXERCISE:\n if not self.assessment_items.filter(~Q(raw_data='') | ~Q(\n question='') & ~Q(answers='[]') & (Q(type=exercises.\n INPUT_QUESTION) | Q(answers__iregex='\"correct\":\\\\s*true'))\n ).exists():\n errors.append(\n 'No questions with question text and complete answers')\n criterion = self.extra_fields.get('options', {}).get(\n 'completion_criteria')\n if not (self.extra_fields.get('mastery_model') or criterion):\n errors.append('Missing mastery criterion')\n if criterion:\n try:\n completion_criteria.validate(criterion, kind=\n content_kinds.EXERCISE)\n except completion_criteria.ValidationError:\n errors.append(\n 'Mastery criterion is defined but is invalid')\n self.complete = not errors\n return errors\n\n def make_content_id_unique(self):\n \"\"\"\n If self is NOT an original contentnode (in other words, a copied contentnode)\n and a contentnode with same content_id exists then we update self's content_id.\n \"\"\"\n is_node_original = (self.original_source_node_id is None or self.\n original_source_node_id == self.node_id)\n node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(\n content_id=self.content_id)\n if not is_node_original and node_same_content_id.exists():\n ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.\n uuid4().hex)\n\n def on_create(self):\n self.changed = True\n self.recalculate_editors_storage()\n self.set_default_learning_activity()\n\n def on_update(self):\n self.changed = self.changed or self.has_changes()\n\n def move_to(self, target, *args, **kwargs):\n parent_was_trashtree = self.parent.channel_trash.exists()\n super(ContentNode, self).move_to(target, *args, **kwargs)\n self.save()\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.\n tree_id, None)\n if target.channel_trash.exists() or parent_was_trashtree:\n self.recalculate_editors_storage()\n\n def set_default_learning_activity(self):\n if self.learning_activities is None:\n if self.kind in kind_activity_map:\n self.learning_activities = {kind_activity_map[self.kind]: True}\n\n def save(self, skip_lock=False, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n old_parent_id = self._field_updates.changed().get('parent_id')\n if self._state.adding and (self.parent_id or self.parent):\n same_order = False\n elif old_parent_id is DeferredAttribute:\n same_order = True\n else:\n same_order = old_parent_id == self.parent_id\n if not same_order:\n changed_ids = list(filter(lambda x: x is not None, set([\n old_parent_id, self.parent_id])))\n else:\n changed_ids = []\n if not same_order and not skip_lock:\n with ContentNode.objects.lock_mptt(*ContentNode.objects.filter(\n id__in=[pid for pid in [old_parent_id, self.parent_id] if\n pid]).values_list('tree_id', flat=True).distinct()):\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(\n changed=True)\n else:\n super(ContentNode, self).save(*args, **kwargs)\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(changed\n =True)\n save.alters_data = True\n\n def delete(self, *args, **kwargs):\n parent = self.parent or self._field_updates.changed().get('parent')\n if parent:\n parent.changed = True\n parent.save()\n self.recalculate_editors_storage()\n with ContentNode.objects.lock_mptt(self.tree_id):\n return super(ContentNode, self).delete(*args, **kwargs)\n delete.alters_data = True\n\n def copy_to(self, target=None, position='last-child', pk=None, mods=\n None, excluded_descendants=None, can_edit_source_channel=None,\n batch_size=None, progress_tracker=None):\n return self._tree_manager.copy_node(self, target, position, pk,\n mods, excluded_descendants, can_edit_source_channel, batch_size,\n progress_tracker)[0]\n\n def copy(self):\n return self.copy_to()\n\n def is_publishable(self):\n return self.complete and self.get_descendants(include_self=True\n ).exclude(kind_id=content_kinds.TOPIC).exists()\n\n\n class Meta:\n verbose_name = 'Topic'\n verbose_name_plural = 'Topics'\n indexes = [models.Index(fields=['node_id'], name=NODE_ID_INDEX_NAME\n ), models.Index(fields=['-modified'], name=\n NODE_MODIFIED_DESC_INDEX_NAME)]\n\n\nclass ContentKind(models.Model):\n kind = models.CharField(primary_key=True, max_length=200, choices=\n content_kinds.choices)\n\n def __str__(self):\n return self.kind\n\n\nclass FileFormat(models.Model):\n extension = models.CharField(primary_key=True, max_length=40, choices=\n file_formats.choices)\n mimetype = models.CharField(max_length=200, blank=True)\n\n def __str__(self):\n return self.extension\n\n\nclass FormatPreset(models.Model):\n id = models.CharField(primary_key=True, max_length=150, choices=\n format_presets.choices)\n readable_name = models.CharField(max_length=400)\n multi_language = models.BooleanField(default=False)\n supplementary = models.BooleanField(default=False)\n thumbnail = models.BooleanField(default=False)\n subtitle = models.BooleanField(default=False)\n display = models.BooleanField(default=True)\n order = models.IntegerField(default=0)\n kind = models.ForeignKey(ContentKind, related_name='format_presets',\n null=True, on_delete=models.SET_NULL)\n allowed_formats = models.ManyToManyField(FileFormat, blank=True)\n\n def __str__(self):\n return self.id\n\n @classmethod\n def guess_format_preset(cls, filename):\n \"\"\"\n Guess the format preset of a filename based on its extension.\n\n Return None if format is unknown.\n \"\"\"\n _, ext = os.path.splitext(filename)\n ext = ext.lstrip('.')\n f = FormatPreset.objects.filter(allowed_formats__extension=ext,\n display=True)\n return f.first()\n\n @classmethod\n def get_preset(cls, preset_name):\n \"\"\"\n Get the FormatPreset object with that exact name.\n\n Returns None if that format preset is not found.\n \"\"\"\n try:\n return FormatPreset.objects.get(id=preset_name)\n except FormatPreset.DoesNotExist:\n return None\n\n\nclass Language(models.Model):\n id = models.CharField(max_length=14, primary_key=True)\n lang_code = models.CharField(max_length=3, db_index=True)\n lang_subcode = models.CharField(max_length=10, db_index=True, blank=\n True, null=True)\n readable_name = models.CharField(max_length=100, blank=True)\n native_name = models.CharField(max_length=100, blank=True)\n lang_direction = models.CharField(max_length=3, choices=languages.\n LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])\n\n def ietf_name(self):\n return '{code}-{subcode}'.format(code=self.lang_code, subcode=self.\n lang_subcode) if self.lang_subcode else self.lang_code\n\n def __str__(self):\n return self.ietf_name()\n\n\n<mask token>\n\n\nclass AssessmentItem(models.Model):\n type = models.CharField(max_length=50, default='multiplechoice')\n question = models.TextField(blank=True)\n hints = models.TextField(default='[]')\n answers = models.TextField(default='[]')\n order = models.IntegerField(default=1)\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'assessment_items', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n assessment_id = UUIDField(primary_key=False, default=uuid.uuid4,\n editable=False)\n raw_data = models.TextField(blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n randomize = models.BooleanField(default=False)\n deleted = models.BooleanField(default=False)\n objects = CustomManager()\n _field_updates = FieldTracker()\n\n def has_changes(self):\n return bool(self._field_updates.changed())\n\n\n class Meta:\n indexes = [models.Index(fields=['assessment_id'], name=\n ASSESSMENT_ID_INDEX_NAME)]\n unique_together = ['contentnode', 'assessment_id']\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n edit_cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(edit_cte).annotate(edit=edit_cte.\n exists(cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef('contentnode__tree_id'\n )).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n def on_create(self):\n \"\"\"\n When an exercise is added to a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n \"\"\"\n When an exercise is updated of a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n When an exercise is deleted from a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n return super(AssessmentItem, self).delete(*args, **kwargs)\n\n\nclass SlideshowSlide(models.Model):\n contentnode = models.ForeignKey('ContentNode', related_name=\n 'slideshow_slides', blank=True, null=True, db_index=True, on_delete\n =models.CASCADE)\n sort_order = models.FloatField(default=1.0)\n metadata = JSONField(default=dict)\n\n\nclass StagedFile(models.Model):\n \"\"\"\n Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit\n \"\"\"\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='staged_files',\n blank=True, null=True, on_delete=models.CASCADE)\n\n\n<mask token>\n\n\nclass File(models.Model):\n \"\"\"\n The bottom layer of the contentDB schema, defines the basic building brick for content.\n Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n file_on_disk = models.FileField(upload_to=object_storage_name, storage=\n default_storage, max_length=500, blank=True)\n contentnode = models.ForeignKey(ContentNode, related_name='files',\n blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n assessment_item = models.ForeignKey(AssessmentItem, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n slideshow_slide = models.ForeignKey(SlideshowSlide, related_name=\n 'files', blank=True, null=True, db_index=True, on_delete=models.CASCADE\n )\n file_format = models.ForeignKey(FileFormat, related_name='files', blank\n =True, null=True, db_index=True, on_delete=models.SET_NULL)\n preset = models.ForeignKey(FormatPreset, related_name='files', blank=\n True, null=True, db_index=True, on_delete=models.SET_NULL)\n language = models.ForeignKey(Language, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n original_filename = models.CharField(max_length=255, blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='files', blank=True,\n null=True, on_delete=models.SET_NULL)\n modified = models.DateTimeField(auto_now=True, verbose_name='modified',\n null=True)\n duration = models.IntegerField(blank=True, null=True)\n objects = CustomManager()\n _permission_filter = Q(tree_id=OuterRef('contentnode__tree_id')) | Q(\n tree_id=OuterRef('assessment_item__contentnode__tree_id'))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n if not user_id:\n return queryset.none()\n cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls.\n _permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(edit=True) | Q(uploaded_by=user,\n contentnode__isnull=True, assessment_item__isnull=True))\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n queryset = queryset.annotate(public=Exists(Channel.objects.filter(\n public=True).filter(Q(main_tree__tree_id=OuterRef(\n 'contentnode__tree_id')) | Q(main_tree__tree_id=OuterRef(\n 'assessment_item__contentnode__tree_id'))).values('pk')))\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=\n boolean_val(False)).filter(public=True)\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(edit\n =edit_cte.exists(cls._permission_filter), view=view_cte.exists(\n cls._permission_filter))\n if user.is_admin:\n return queryset\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True) |\n Q(uploaded_by=user, contentnode__isnull=True,\n assessment_item__isnull=True))\n\n\n class Admin:\n pass\n\n def __str__(self):\n return '{checksum}{extension}'.format(checksum=self.checksum,\n extension='.' + self.file_format.extension)\n\n def filename(self):\n \"\"\"\n Returns just the filename of the File in storage, without the path\n\n e.g. abcd.mp4\n \"\"\"\n return os.path.basename(self.file_on_disk.name)\n\n def update_contentnode_content_id(self):\n \"\"\"\n If the file is attached to a contentnode and is not a thumbnail\n then update that contentnode's content_id if it's a copied contentnode.\n \"\"\"\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n self.modified = timezone.now()\n self.update_contentnode_content_id()\n\n def save(self, set_by_file_on_disk=True, *args, **kwargs):\n \"\"\"\n Overrider the default save method.\n If the file_on_disk FileField gets passed a content copy:\n 1. generate the MD5 from the content copy\n 2. fill the other fields accordingly\n \"\"\"\n from contentcuration.utils.user import calculate_user_storage\n if self.file_format_id:\n if self.file_format_id not in dict(file_formats.choices):\n raise ValidationError('Invalid file_format')\n if set_by_file_on_disk and self.file_on_disk:\n if self.checksum is None or self.checksum == '':\n md5 = hashlib.md5()\n for chunk in self.file_on_disk.chunks():\n md5.update(chunk)\n self.checksum = md5.hexdigest()\n if not self.file_size:\n self.file_size = self.file_on_disk.size\n if not self.file_format_id:\n ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')\n if ext in list(dict(file_formats.choices).keys()):\n self.file_format_id = ext\n else:\n raise ValueError('Files of type `{}` are not supported.'\n .format(ext))\n super(File, self).save(*args, **kwargs)\n if self.uploaded_by_id:\n calculate_user_storage(self.uploaded_by_id)\n\n\n class Meta:\n indexes = [models.Index(fields=['checksum', 'file_size'], name=\n FILE_DISTINCT_INDEX_NAME), models.Index(fields=['-modified'],\n name=FILE_MODIFIED_DESC_INDEX_NAME)]\n constraints = [models.CheckConstraint(check=Q(preset__in=\n MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True), name\n =FILE_DURATION_CONSTRAINT)]\n\n\n<mask token>\n\n\nclass PrerequisiteContentRelationship(models.Model):\n \"\"\"\n Predefine the prerequisite relationship between two ContentNode objects.\n \"\"\"\n target_node = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)\n prerequisite = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['target_node', 'prerequisite']\n\n def clean(self, *args, **kwargs):\n if self.target_node == self.prerequisite:\n raise IntegrityError('Cannot self reference as prerequisite.')\n if PrerequisiteContentRelationship.objects.using(self._state.db\n ).filter(target_node=self.prerequisite, prerequisite=self.\n target_node):\n raise IntegrityError(\n 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'\n % (self.target_node, self.prerequisite))\n super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super(PrerequisiteContentRelationship, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n\nclass RelatedContentRelationship(models.Model):\n \"\"\"\n Predefine the related relationship between two ContentNode objects.\n \"\"\"\n contentnode_1 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_1', on_delete=models.CASCADE)\n contentnode_2 = models.ForeignKey(ContentNode, related_name=\n '%(app_label)s_%(class)s_2', on_delete=models.CASCADE)\n\n\n class Meta:\n unique_together = ['contentnode_1', 'contentnode_2']\n\n def save(self, *args, **kwargs):\n if self.contentnode_1 == self.contentnode_2:\n raise IntegrityError('Cannot self reference as related.')\n if RelatedContentRelationship.objects.using(self._state.db).filter(\n contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1\n ):\n return\n super(RelatedContentRelationship, self).save(*args, **kwargs)\n\n\nclass Invitation(models.Model):\n \"\"\" Invitation to edit channel \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n accepted = models.BooleanField(default=False)\n declined = models.BooleanField(default=False)\n revoked = models.BooleanField(default=False)\n invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n SET_NULL, null=True, related_name='sent_to')\n share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)\n email = models.EmailField(max_length=100, null=True)\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\n 'sent_by', null=True, on_delete=models.CASCADE)\n channel = models.ForeignKey('Channel', null=True, related_name=\n 'pending_editors', on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100, blank=True)\n last_name = models.CharField(max_length=100, blank=True, null=True)\n\n\n class Meta:\n verbose_name = 'Invitation'\n verbose_name_plural = 'Invitations'\n\n def accept(self):\n user = User.objects.filter(email__iexact=self.email).first()\n if self.channel:\n if self.share_mode == VIEW_ACCESS:\n self.channel.editors.remove(user)\n self.channel.viewers.add(user)\n else:\n self.channel.viewers.remove(user)\n self.channel.editors.add(user)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user)).distinct()\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n if user.is_admin:\n return queryset\n return queryset.filter(Q(email__iexact=user.email) | Q(sender=user) |\n Q(channel__editors=user) | Q(channel__viewers=user)).distinct()\n\n\nclass Change(models.Model):\n server_rev = models.BigAutoField(primary_key=True)\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,\n blank=True, on_delete=models.SET_NULL, related_name='changes_by_user')\n channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=\n models.CASCADE)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=\n True, on_delete=models.CASCADE, related_name='changes_about_user')\n client_rev = models.IntegerField(null=True, blank=True)\n session = models.ForeignKey(Session, null=True, blank=True, on_delete=\n models.SET_NULL)\n table = models.CharField(max_length=32)\n change_type = models.IntegerField()\n kwargs = JSONField(encoder=JSONEncoder)\n applied = models.BooleanField(default=False)\n errored = models.BooleanField(default=False)\n\n @classmethod\n def _create_from_change(cls, created_by_id=None, channel_id=None,\n user_id=None, session_key=None, applied=False, table=None, rev=None,\n **data):\n change_type = data.pop('type')\n if table is None or table not in ALL_TABLES:\n raise TypeError(\n 'table is a required argument for creating changes and must be a valid table name'\n )\n if change_type is None or change_type not in ALL_CHANGES:\n raise TypeError(\n 'change_type is a required argument for creating changes and must be a valid change type integer'\n )\n return cls(session_id=session_key, created_by_id=created_by_id,\n channel_id=channel_id, user_id=user_id, client_rev=rev, table=\n table, change_type=change_type, kwargs=data, applied=applied)\n\n @classmethod\n def create_changes(cls, changes, created_by_id=None, session_key=None,\n applied=False):\n change_models = []\n for change in changes:\n change_models.append(cls._create_from_change(created_by_id=\n created_by_id, session_key=session_key, applied=applied, **\n change))\n cls.objects.bulk_create(change_models)\n return change_models\n\n @classmethod\n def create_change(cls, change, created_by_id=None, session_key=None,\n applied=False):\n obj = cls._create_from_change(created_by_id=created_by_id,\n session_key=session_key, applied=applied, **change)\n obj.save()\n return obj\n\n @classmethod\n def serialize(cls, change):\n datum = get_attribute(change, ['kwargs']).copy()\n datum.update({'server_rev': get_attribute(change, ['server_rev']),\n 'table': get_attribute(change, ['table']), 'type':\n get_attribute(change, ['change_type']), 'channel_id':\n get_attribute(change, ['channel_id']), 'user_id': get_attribute\n (change, ['user_id']), 'created_by_id': get_attribute(change, [\n 'created_by_id'])})\n return datum\n\n def serialize_to_change_dict(self):\n return self.serialize(self)\n\n\nclass TaskResultCustom(object):\n \"\"\"\n Custom fields to add to django_celery_results's TaskResult model\n\n If adding fields to this class, run `makemigrations` then move the generated migration from the\n `django_celery_results` app to the `contentcuration` app and override the constructor to change\n the app_label. See `0141_add_task_signature` for an example\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='tasks',\n on_delete=models.CASCADE, null=True)\n channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)\n progress = models.IntegerField(null=True, blank=True, validators=[\n MinValueValidator(0), MaxValueValidator(100)])\n signature = models.CharField(null=True, blank=False, max_length=32)\n super_as_dict = TaskResult.as_dict\n\n def as_dict(self):\n \"\"\"\n :return: A dictionary representation\n \"\"\"\n super_dict = self.super_as_dict()\n super_dict.update(user_id=self.user_id, channel_id=self.channel_id,\n progress=self.progress)\n return super_dict\n\n @classmethod\n def contribute_to_class(cls, model_class=TaskResult):\n \"\"\"\n Adds fields to model, by default TaskResult\n :param model_class: TaskResult model\n \"\"\"\n for field in dir(cls):\n if not field.startswith('_') and field not in (\n 'contribute_to_class', 'Meta'):\n model_class.add_to_class(field, getattr(cls, field))\n setattr(model_class._meta, 'indexes', getattr(model_class._meta,\n 'indexes', []) + cls.Meta.indexes)\n\n\n class Meta:\n indexes = [models.Index(fields=['signature'], name=\n 'task_result_signature_idx', condition=Q(status__in=\n celery_states.UNREADY_STATES))]\n\n\n<mask token>\n",
"step-5": "import hashlib\nimport json\nimport logging\nimport os\nimport urllib.parse\nimport uuid\nfrom datetime import datetime\n\nimport pytz\nfrom celery import states as celery_states\nfrom django.conf import settings\nfrom django.contrib.auth.base_user import AbstractBaseUser\nfrom django.contrib.auth.base_user import BaseUserManager\nfrom django.contrib.auth.models import PermissionsMixin\nfrom django.contrib.sessions.models import Session\nfrom django.core.cache import cache\nfrom django.core.exceptions import MultipleObjectsReturned\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.exceptions import ValidationError\nfrom django.core.files.storage import default_storage\nfrom django.core.files.storage import FileSystemStorage\nfrom django.core.mail import send_mail\nfrom django.core.validators import MaxValueValidator\nfrom django.core.validators import MinValueValidator\nfrom django.db import IntegrityError\nfrom django.db import models\nfrom django.db.models import Count\nfrom django.db.models import Exists\nfrom django.db.models import F\nfrom django.db.models import Index\nfrom django.db.models import JSONField\nfrom django.db.models import Max\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.db.models import UUIDField as DjangoUUIDField\nfrom django.db.models import Value\nfrom django.db.models.expressions import ExpressionList\nfrom django.db.models.expressions import RawSQL\nfrom django.db.models.functions import Lower\nfrom django.db.models.indexes import IndexExpression\nfrom django.db.models.query_utils import DeferredAttribute\nfrom django.db.models.sql import Query\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\nfrom django_celery_results.models import TaskResult\nfrom django_cte import With\nfrom le_utils import proquint\nfrom le_utils.constants import content_kinds\nfrom le_utils.constants import exercises\nfrom le_utils.constants import file_formats\nfrom le_utils.constants import format_presets\nfrom le_utils.constants import languages\nfrom le_utils.constants import roles\nfrom model_utils import FieldTracker\nfrom mptt.models import MPTTModel\nfrom mptt.models import raise_if_unsaved\nfrom mptt.models import TreeForeignKey\nfrom postmark.core import PMMailInactiveRecipientException\nfrom postmark.core import PMMailUnauthorizedException\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.fields import get_attribute\nfrom rest_framework.utils.encoders import JSONEncoder\n\nfrom contentcuration.constants import channel_history\nfrom contentcuration.constants import completion_criteria\nfrom contentcuration.constants import user_history\nfrom contentcuration.constants.contentnode import kind_activity_map\nfrom contentcuration.db.models.expressions import Array\nfrom contentcuration.db.models.functions import ArrayRemove\nfrom contentcuration.db.models.functions import Unnest\nfrom contentcuration.db.models.manager import CustomContentNodeTreeManager\nfrom contentcuration.db.models.manager import CustomManager\nfrom contentcuration.statistics import record_channel_stats\nfrom contentcuration.utils.cache import delete_public_channel_cache_keys\nfrom contentcuration.utils.parser import load_json_string\nfrom contentcuration.viewsets.sync.constants import ALL_CHANGES\nfrom contentcuration.viewsets.sync.constants import ALL_TABLES\n\n\nEDIT_ACCESS = \"edit\"\nVIEW_ACCESS = \"view\"\n\nDEFAULT_CONTENT_DEFAULTS = {\n 'license': None,\n 'language': None,\n 'author': None,\n 'aggregator': None,\n 'provider': None,\n 'copyright_holder': None,\n 'license_description': None,\n 'mastery_model': exercises.NUM_CORRECT_IN_A_ROW_5,\n 'm_value': 5,\n 'n_value': 5,\n 'auto_derive_video_thumbnail': True,\n 'auto_derive_audio_thumbnail': True,\n 'auto_derive_document_thumbnail': True,\n 'auto_derive_html5_thumbnail': True,\n 'auto_derive_exercise_thumbnail': True,\n 'auto_randomize_questions': True,\n}\nDEFAULT_USER_PREFERENCES = json.dumps(DEFAULT_CONTENT_DEFAULTS, ensure_ascii=False)\n\n\ndef to_pk(model_or_pk):\n if isinstance(model_or_pk, models.Model):\n return model_or_pk.pk\n return model_or_pk\n\n\nclass UserManager(BaseUserManager):\n\n def create_user(self, email, first_name, last_name, password=None):\n if not email:\n raise ValueError('Email address not specified')\n\n new_user = self.model(\n email=self.normalize_email(email),\n )\n\n new_user.set_password(password)\n new_user.first_name = first_name\n new_user.last_name = last_name\n new_user.save(using=self._db)\n return new_user\n\n def create_superuser(self, email, first_name, last_name, password=None):\n new_user = self.create_user(email, first_name, last_name, password=password)\n new_user.is_admin = True\n new_user.save(using=self._db)\n return new_user\n\n\nclass UniqueActiveUserIndex(Index):\n def create_sql(self, model, schema_editor, using='', **kwargs):\n \"\"\"\n This is a vendored and modified version of the Django create_sql method\n We do this so that we can monkey patch in the unique index statement onto the schema_editor\n while we create the statement for this index, and then revert it to normal.\n\n We should remove this as soon as Django natively supports UniqueConstraints with Expressions.\n This should hopefully be the case in Django 3.3.\n \"\"\"\n include = [model._meta.get_field(field_name).column for field_name in self.include]\n condition = self._get_condition_sql(model, schema_editor)\n if self.expressions:\n index_expressions = []\n for expression in self.expressions:\n index_expression = IndexExpression(expression)\n index_expression.set_wrapper_classes(schema_editor.connection)\n index_expressions.append(index_expression)\n expressions = ExpressionList(*index_expressions).resolve_expression(\n Query(model, alias_cols=False),\n )\n fields = None\n col_suffixes = None\n else:\n fields = [\n model._meta.get_field(field_name)\n for field_name, _ in self.fields_orders\n ]\n col_suffixes = [order[1] for order in self.fields_orders]\n expressions = None\n sql = \"CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(include)s%(condition)s\"\n # Store the normal SQL statement for indexes\n old_create_index_sql = schema_editor.sql_create_index\n # Replace it with our own unique index so that this index actually adds a constraint\n schema_editor.sql_create_index = sql\n # Generate the SQL staetment that we want to return\n return_statement = schema_editor._create_index_sql(\n model, fields=fields, name=self.name, using=using,\n db_tablespace=self.db_tablespace, col_suffixes=col_suffixes,\n opclasses=self.opclasses, condition=condition, include=include,\n expressions=expressions, **kwargs,\n )\n # Reinstate the previous index SQL statement so that we have done no harm\n schema_editor.sql_create_index = old_create_index_sql\n # Return our SQL statement\n return return_statement\n\n\nclass User(AbstractBaseUser, PermissionsMixin):\n email = models.EmailField(max_length=100, unique=True)\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n is_admin = models.BooleanField(default=False)\n is_active = models.BooleanField('active', default=False,\n help_text='Designates whether this user should be treated as active.')\n is_staff = models.BooleanField('staff status', default=False,\n help_text='Designates whether the user can log into this admin site.')\n date_joined = models.DateTimeField('date joined', default=timezone.now)\n clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='user_clipboard', on_delete=models.SET_NULL)\n preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)\n disk_space = models.FloatField(default=524288000, help_text='How many bytes a user can upload')\n disk_space_used = models.FloatField(default=0, help_text='How many bytes a user has uploaded')\n\n information = JSONField(null=True)\n content_defaults = JSONField(default=dict)\n policies = JSONField(default=dict, null=True)\n feature_flags = JSONField(default=dict, null=True)\n\n deleted = models.BooleanField(default=False, db_index=True)\n\n _field_updates = FieldTracker(fields=[\n # Field to watch for changes\n \"disk_space\",\n ])\n\n objects = UserManager()\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['first_name', 'last_name']\n\n def __unicode__(self):\n return self.email\n\n def delete(self):\n \"\"\"\n Soft deletes the user account.\n \"\"\"\n self.deleted = True\n # Deactivate the user to disallow authentication and also\n # to let the user verify the email again after recovery.\n self.is_active = False\n self.save()\n self.history.create(user_id=self.pk, action=user_history.DELETION)\n\n def recover(self):\n \"\"\"\n Use this method when we want to recover a user.\n \"\"\"\n self.deleted = False\n self.save()\n self.history.create(user_id=self.pk, action=user_history.RECOVERY)\n\n def hard_delete_user_related_data(self):\n \"\"\"\n Hard delete all user related data. But keeps the user record itself intact.\n\n User related data that gets hard deleted are:\n - sole editor non-public channels.\n - sole editor non-public channelsets.\n - sole editor non-public channels' content nodes and its underlying files that are not\n used by any other channel.\n - all user invitations.\n \"\"\"\n from contentcuration.viewsets.common import SQCount\n\n # Hard delete invitations associated to this account.\n self.sent_to.all().delete()\n self.sent_by.all().delete()\n\n editable_channels_user_query = (\n User.objects.filter(editable_channels__id=OuterRef('id'))\n .values_list('id', flat=True)\n .distinct()\n )\n non_public_channels_sole_editor = self.editable_channels.annotate(num_editors=SQCount(\n editable_channels_user_query, field=\"id\")).filter(num_editors=1, public=False)\n\n # Point sole editor non-public channels' contentnodes to orphan tree to let\n # our garbage collection delete the nodes and underlying files.\n ContentNode._annotate_channel_id(ContentNode.objects).filter(channel_id__in=list(\n non_public_channels_sole_editor.values_list(\"id\", flat=True))).update(parent_id=settings.ORPHANAGE_ROOT_ID)\n\n # Hard delete non-public channels associated with this user (if user is the only editor).\n non_public_channels_sole_editor.delete()\n\n # Hard delete non-public channel collections associated with this user (if user is the only editor).\n user_query = (\n User.objects.filter(channel_sets__id=OuterRef('id'))\n .values_list('id', flat=True)\n .distinct()\n )\n self.channel_sets.annotate(num_editors=SQCount(user_query, field=\"id\")).filter(num_editors=1, public=False).delete()\n\n # Create history!\n self.history.create(user_id=self.pk, action=user_history.RELATED_DATA_HARD_DELETION)\n\n def can_edit(self, channel_id):\n return Channel.filter_edit_queryset(Channel.objects.all(), self).filter(pk=channel_id).exists()\n\n def check_space(self, size, checksum):\n if self.is_admin:\n return True\n\n active_files = self.get_user_active_files()\n if active_files.filter(checksum=checksum).exists():\n return True\n\n space = self.get_available_space(active_files=active_files)\n if space < size:\n raise PermissionDenied(_(\"Not enough space. Check your storage under Settings page.\"))\n\n def check_channel_space(self, channel):\n active_files = self.get_user_active_files()\n staging_tree_id = channel.staging_tree.tree_id\n channel_files = self.files\\\n .filter(contentnode__tree_id=staging_tree_id)\\\n .values('checksum')\\\n .distinct()\\\n .exclude(checksum__in=active_files.values_list('checksum', flat=True))\n staged_size = float(channel_files.aggregate(used=Sum('file_size'))['used'] or 0)\n\n if self.get_available_space(active_files=active_files) < (staged_size):\n raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))\n\n def check_staged_space(self, size, checksum):\n if self.staged_files.filter(checksum=checksum).exists():\n return True\n space = self.get_available_staged_space()\n if space < size:\n raise PermissionDenied(_('Out of storage! Request more space under Settings > Storage.'))\n\n def get_available_staged_space(self):\n space_used = self.staged_files.values('checksum').distinct().aggregate(size=Sum(\"file_size\"))['size'] or 0\n return float(max(self.disk_space - space_used, 0))\n\n def get_available_space(self, active_files=None):\n return float(max(self.disk_space - self.get_space_used(active_files=active_files), 0))\n\n def get_user_active_trees(self):\n return self.editable_channels.exclude(deleted=True)\\\n .values(tree_id=F(\"main_tree__tree_id\"))\n\n def get_user_active_files(self):\n cte = With(self.get_user_active_trees().distinct())\n\n return cte.join(self.files.get_queryset(), contentnode__tree_id=cte.col.tree_id)\\\n .with_cte(cte)\\\n .values('checksum')\\\n .distinct()\n\n def get_space_used(self, active_files=None):\n active_files = active_files or self.get_user_active_files()\n files = active_files.aggregate(total_used=Sum('file_size'))\n return float(files['total_used'] or 0)\n\n def set_space_used(self):\n self.disk_space_used = self.get_space_used()\n self.save()\n return self.disk_space_used\n\n def get_space_used_by_kind(self):\n active_files = self.get_user_active_files()\n files = active_files.values('preset__kind_id')\\\n .annotate(space=Sum('file_size'))\\\n .order_by()\n\n kind_dict = {}\n for item in files:\n kind_dict[item['preset__kind_id']] = item['space']\n return kind_dict\n\n def email_user(self, subject, message, from_email=None, **kwargs):\n try:\n # msg = EmailMultiAlternatives(subject, message, from_email, [self.email])\n # msg.attach_alternative(kwargs[\"html_message\"],\"text/html\")\n # msg.send()\n send_mail(subject, message, from_email, [self.email], **kwargs)\n except (PMMailInactiveRecipientException, PMMailUnauthorizedException) as e:\n logging.error(str(e))\n\n def clean(self):\n super(User, self).clean()\n self.email = self.__class__.objects.normalize_email(self.email)\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_short_name(self):\n \"\"\"\n Returns the short name for the user.\n \"\"\"\n return self.first_name\n\n def get_token(self):\n token, _ = Token.objects.get_or_create(user=self)\n return token.key\n\n def save(self, *args, **kwargs):\n from contentcuration.utils.user import calculate_user_storage\n super(User, self).save(*args, **kwargs)\n\n if 'disk_space' in self._field_updates.changed():\n calculate_user_storage(self.pk)\n\n changed = False\n\n if not self.content_defaults:\n self.content_defaults = DEFAULT_CONTENT_DEFAULTS\n changed = True\n\n if not self.clipboard_tree:\n self.clipboard_tree = ContentNode.objects.create(title=self.email + \" clipboard\", kind_id=content_kinds.TOPIC)\n self.clipboard_tree.save()\n changed = True\n\n if changed:\n self.save()\n\n class Meta:\n verbose_name = \"User\"\n verbose_name_plural = \"Users\"\n indexes = [\n UniqueActiveUserIndex(Lower('email'), condition=Q(is_active=True), name=\"contentcura_email_d4d492_idx\")\n ]\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n\n if user.is_admin:\n return queryset\n\n # all shared editors\n all_editable = User.editable_channels.through.objects.all()\n editable = all_editable.filter(\n channel_id__in=all_editable.filter(user_id=user.pk).values_list(\"channel_id\", flat=True)\n )\n\n # all shared viewers\n all_view_only = User.view_only_channels.through.objects.all()\n view_only = all_view_only.filter(\n channel_id__in=all_view_only.filter(user_id=user.pk).values_list(\"channel_id\", flat=True)\n )\n\n return queryset.filter(\n Q(pk=user.pk)\n | Q(pk__in=editable.values_list(\"user_id\", flat=True))\n | Q(pk__in=view_only.values_list(\"user_id\", flat=True))\n )\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(pk=user.pk)\n\n @classmethod\n def get_for_email(cls, email, deleted=False, **filters):\n \"\"\"\n Returns the appropriate User record given an email, ordered by:\n - those with is_active=True first, which there should only ever be one\n - otherwise by ID DESC so most recent inactive shoud be returned\n\n Filters out deleted User records by default. To include both deleted and\n undeleted user records pass None to the deleted argument.\n\n :param email: A string of the user's email\n :param filters: Additional filters to filter the User queryset\n :return: User or None\n \"\"\"\n user_qs = User.objects.filter(email__iexact=email.strip())\n if deleted is not None:\n user_qs = user_qs.filter(deleted=deleted)\n return user_qs.filter(**filters).order_by(\"-is_active\", \"-id\").first()\n\n\nclass UUIDField(models.CharField):\n\n def __init__(self, *args, **kwargs):\n kwargs['max_length'] = 32\n super(UUIDField, self).__init__(*args, **kwargs)\n\n def prepare_value(self, value):\n if isinstance(value, uuid.UUID):\n return value.hex\n return value\n\n def get_default(self):\n result = super(UUIDField, self).get_default()\n if isinstance(result, uuid.UUID):\n result = result.hex\n return result\n\n def to_python(self, value):\n if isinstance(value, uuid.UUID):\n return value.hex\n return value\n\n\nclass MPTTTreeIDManager(models.Model):\n \"\"\"\n Because MPTT uses plain integers for tree IDs and does not use an auto-incrementing field for them,\n the same ID can sometimes be assigned to two trees if two channel create ops happen concurrently.\n\n As we are using this table only for the ID generation, it does not need any fields.\n\n We resolve this by creating a dummy table and using its ID as the tree index to take advantage of the db's\n concurrency-friendly way of generating sequential integer IDs. There is a custom migration that ensures\n that the number of records (and thus id) matches the max tree ID number when this table gets added.\n \"\"\"\n\n\ndef file_on_disk_name(instance, filename):\n \"\"\"\n Create a name spaced file path from the File obejct's checksum property.\n This path will be used to store the content copy\n :param instance: File (content File model)\n :param filename: str\n :return: str\n \"\"\"\n return generate_file_on_disk_name(instance.checksum, filename)\n\n\ndef generate_file_on_disk_name(checksum, filename):\n \"\"\" Separated from file_on_disk_name to allow for simple way to check if has already exists \"\"\"\n h = checksum\n basename, ext = os.path.splitext(filename)\n directory = os.path.join(settings.STORAGE_ROOT, h[0], h[1])\n if not os.path.exists(directory):\n os.makedirs(directory)\n return os.path.join(directory, h + ext.lower())\n\n\ndef object_storage_name(instance, filename):\n \"\"\"\n Create a name spaced file path from the File obejct's checksum property.\n This path will be used to store the content copy\n\n :param instance: File (content File model)\n :param filename: str\n :return: str\n \"\"\"\n\n default_ext = ''\n if instance.file_format_id:\n default_ext = '.{}'.format(instance.file_format_id)\n\n return generate_object_storage_name(instance.checksum, filename, default_ext)\n\n\ndef generate_object_storage_name(checksum, filename, default_ext=''):\n \"\"\" Separated from file_on_disk_name to allow for simple way to check if has already exists \"\"\"\n h = checksum\n basename, actual_ext = os.path.splitext(filename)\n ext = actual_ext if actual_ext else default_ext\n\n # Use / instead of os.path.join as Windows makes this \\\\\n directory = \"/\".join([settings.STORAGE_ROOT, h[0], h[1]])\n return os.path.join(directory, h + ext.lower())\n\n\ndef generate_storage_url(filename, request=None, *args):\n \"\"\"\n Generate a storage URL for the given content filename.\n \"\"\"\n\n path = generate_object_storage_name(os.path.splitext(filename)[0], filename)\n\n # There are three scenarios where Studio might be run as:\n #\n # 1. In normal kubernetes, nginx will proxy for us. We'll know we're in kubernetes when the\n # environment variable RUN_MODE=k8s\n #\n # 2. In Docker Compose and bare metal runserver, we'll be running in runserver, and minio\n # will be exposed in port 9000 in the host's localhost network.\n\n # Note (aron): returning the true storage URL (e.g. https://storage.googleapis.com/storage/a.mp4)\n # isn't too important, because we have CDN in front of our servers, so it should be cached.\n # But change the logic here in case there is a potential for bandwidth and latency improvement.\n\n # Detect our current state first\n run_mode = os.getenv(\"RUN_MODE\")\n\n # if we're running inside k8s, then just serve the normal /content/{storage,databases} URL,\n # and let nginx handle proper proxying.\n if run_mode == \"k8s\":\n url = \"/content/{path}\".format(\n path=path,\n )\n\n # if we're in docker-compose or in baremetal, just return the object storage URL as localhost:9000\n elif run_mode == \"docker-compose\" or run_mode is None:\n # generate the minio storage URL, so we can get the GET parameters that give everyone\n # access even if they don't need to log in\n params = urllib.parse.urlparse(default_storage.url(path)).query\n host = \"localhost\"\n port = 9000 # hardcoded to the default minio IP address\n url = \"http://{host}:{port}/{bucket}/{path}?{params}\".format(\n host=host,\n port=port,\n bucket=settings.AWS_S3_BUCKET_NAME,\n path=path,\n params=params,\n )\n\n return url\n\n\nclass FileOnDiskStorage(FileSystemStorage):\n \"\"\"\n Overrider FileSystemStorage's default save method to ignore duplicated file.\n \"\"\"\n\n def get_available_name(self, name):\n return name\n\n def _save(self, name, content):\n if self.exists(name):\n # if the file exists, do not call the superclasses _save method\n logging.warn('Content copy \"%s\" already exists!' % name)\n return name\n return super(FileOnDiskStorage, self)._save(name, content)\n\n\nclass SecretToken(models.Model):\n \"\"\"Tokens for channels\"\"\"\n token = models.CharField(max_length=100, unique=True)\n is_primary = models.BooleanField(default=False)\n\n @classmethod\n def exists(cls, token):\n \"\"\"\n Return true when the token string given by string already exists.\n Returns false otherwise.\n \"\"\"\n return cls.objects.filter(token=token).exists()\n\n @classmethod\n def generate_new_token(cls):\n \"\"\"\n Creates a primary secret token for the current channel using a proquint\n string. Creates a secondary token containing the channel id.\n\n These tokens can be used to refer to the channel to download its content\n database.\n \"\"\"\n token = proquint.generate()\n\n # Try 100 times to generate a unique token.\n TRIALS = 100\n for __ in range(TRIALS):\n token = proquint.generate()\n if SecretToken.exists(token):\n continue\n break\n # after TRIALS attempts and we didn't get a unique token,\n # just raise an error.\n # See https://stackoverflow.com/a/9980160 on what for-else loop does.\n else:\n raise ValueError(\"Cannot generate new token\")\n\n # We found a unique token! Save it\n return token\n\n def __str__(self):\n return \"{}-{}\".format(self.token[:5], self.token[5:])\n\n\ndef get_channel_thumbnail(channel):\n if not isinstance(channel, dict):\n channel = channel.__dict__\n if channel.get(\"thumbnail_encoding\"):\n thumbnail_data = channel.get(\"thumbnail_encoding\")\n if thumbnail_data.get(\"base64\"):\n return thumbnail_data[\"base64\"]\n\n if channel.get(\"thumbnail\") and 'static' not in channel.get(\"thumbnail\"):\n return generate_storage_url(channel.get(\"thumbnail\"))\n\n return '/static/img/kolibri_placeholder.png'\n\n\nCHANNEL_NAME_INDEX_NAME = \"channel_name_idx\"\n\n\n# A list of all the FKs from Channel object\n# to ContentNode trees\n# used for permissions filtering\nCHANNEL_TREES = (\n \"main_tree\",\n \"chef_tree\",\n \"trash_tree\",\n \"staging_tree\",\n \"previous_tree\",\n)\n\n\ndef boolean_val(val):\n return Value(val, output_field=models.BooleanField())\n\n\nclass PermissionCTE(With):\n tree_id_fields = [\n \"channel__{}__tree_id\".format(tree_name)\n for tree_name in CHANNEL_TREES\n ]\n\n def __init__(self, model, user_id, **kwargs):\n queryset = model.objects.filter(user_id=user_id)\\\n .annotate(\n tree_id=Unnest(ArrayRemove(Array(*self.tree_id_fields), None), output_field=models.IntegerField())\n )\n super(PermissionCTE, self).__init__(queryset=queryset.values(\"user_id\", \"channel_id\", \"tree_id\"), **kwargs)\n\n @classmethod\n def editable_channels(cls, user_id):\n return PermissionCTE(User.editable_channels.through, user_id, name=\"editable_channels_cte\")\n\n @classmethod\n def view_only_channels(cls, user_id):\n return PermissionCTE(User.view_only_channels.through, user_id, name=\"view_only_channels_cte\")\n\n def exists(self, *filters):\n return Exists(self.queryset().filter(*filters).values(\"user_id\"))\n\n\nclass Channel(models.Model):\n \"\"\" Permissions come from association with organizations \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n tagline = models.CharField(max_length=150, blank=True, null=True)\n version = models.IntegerField(default=0)\n thumbnail = models.TextField(blank=True, null=True)\n thumbnail_encoding = JSONField(default=dict)\n editors = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='editable_channels',\n verbose_name=\"editors\",\n help_text=\"Users with edit rights\",\n blank=True,\n )\n viewers = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='view_only_channels',\n verbose_name=\"viewers\",\n help_text=\"Users with view only rights\",\n blank=True,\n )\n language = models.ForeignKey('Language', null=True, blank=True, related_name='channel_language', on_delete=models.SET_NULL)\n trash_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_trash', on_delete=models.SET_NULL)\n clipboard_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_clipboard', on_delete=models.SET_NULL)\n main_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_main', on_delete=models.SET_NULL)\n staging_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_staging', on_delete=models.SET_NULL)\n chef_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_chef', on_delete=models.SET_NULL)\n previous_tree = models.ForeignKey('ContentNode', null=True, blank=True, related_name='channel_previous', on_delete=models.SET_NULL)\n bookmarked_by = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='bookmarked_channels',\n verbose_name=\"bookmarked by\",\n )\n deleted = models.BooleanField(default=False, db_index=True)\n public = models.BooleanField(default=False, db_index=True)\n preferences = models.TextField(default=DEFAULT_USER_PREFERENCES)\n content_defaults = JSONField(default=dict)\n priority = models.IntegerField(default=0, help_text=\"Order to display public channels\")\n last_published = models.DateTimeField(blank=True, null=True)\n secret_tokens = models.ManyToManyField(\n SecretToken,\n related_name='channels',\n verbose_name=\"secret tokens\",\n blank=True,\n )\n source_url = models.CharField(max_length=200, blank=True, null=True)\n demo_server_url = models.CharField(max_length=200, blank=True, null=True)\n\n # Fields specific to content generated by Ricecooker\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n ricecooker_version = models.CharField(max_length=100, blank=True, null=True)\n\n # Fields to calculate when channel is published\n published_data = JSONField(default=dict)\n icon_encoding = models.TextField(blank=True, null=True)\n total_resource_count = models.IntegerField(default=0)\n published_kind_count = models.TextField(blank=True, null=True)\n published_size = models.FloatField(default=0)\n included_languages = models.ManyToManyField(\n \"Language\",\n related_name='channels',\n verbose_name=\"languages\",\n blank=True,\n )\n\n _field_updates = FieldTracker(fields=[\n # Field to watch for changes\n \"description\",\n \"language_id\",\n \"thumbnail\",\n \"name\",\n \"thumbnail_encoding\",\n # watch these fields for changes\n # but exclude them from setting changed\n # on the main tree\n \"deleted\",\n \"public\",\n \"main_tree_id\",\n \"version\",\n ])\n\n @classmethod\n def get_editable(cls, user, channel_id):\n return cls.filter_edit_queryset(cls.objects.all(), user).get(id=channel_id)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n # it won't return anything\n if not user_id:\n return queryset.none()\n\n edit = Exists(User.editable_channels.through.objects.filter(user_id=user_id, channel_id=OuterRef(\"id\")))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n user_email = not user.is_anonymous and user.email\n\n if user_id:\n filters = dict(user_id=user_id, channel_id=OuterRef(\"id\"))\n edit = Exists(User.editable_channels.through.objects.filter(**filters).values(\"user_id\"))\n view = Exists(User.view_only_channels.through.objects.filter(**filters).values(\"user_id\"))\n else:\n edit = boolean_val(False)\n view = boolean_val(False)\n\n queryset = queryset.annotate(\n edit=edit,\n view=view,\n )\n\n if user_id and user.is_admin:\n return queryset\n\n permission_filter = Q()\n if user_id:\n pending_channels = Invitation.objects.filter(email=user_email, revoked=False, declined=False, accepted=False).values_list(\n \"channel_id\", flat=True\n )\n permission_filter = (\n Q(view=True) | Q(edit=True) | Q(deleted=False, id__in=pending_channels)\n )\n\n return queryset.filter(permission_filter | Q(deleted=False, public=True))\n\n @classmethod\n def get_all_channels(cls):\n return cls.objects.select_related('main_tree').prefetch_related('editors', 'viewers').distinct()\n\n def resource_size_key(self):\n return \"{}_resource_size\".format(self.pk)\n\n # Might be good to display resource size, but need to improve query time first\n\n def get_resource_size(self):\n cached_data = cache.get(self.resource_size_key())\n if cached_data:\n return cached_data\n tree_id = self.main_tree.tree_id\n files = File.objects.select_related('contentnode', 'assessment_item')\\\n .filter(contentnode__tree_id=tree_id)\\\n .values('checksum', 'file_size')\\\n .distinct()\\\n .aggregate(resource_size=Sum('file_size'))\n cache.set(self.resource_size_key(), files['resource_size'] or 0, None)\n return files['resource_size'] or 0\n\n def on_create(self):\n record_channel_stats(self, None)\n if not self.content_defaults:\n self.content_defaults = DEFAULT_CONTENT_DEFAULTS\n\n if not self.main_tree:\n self.main_tree = ContentNode.objects.create(\n title=self.name,\n kind_id=content_kinds.TOPIC,\n content_id=self.id,\n node_id=self.id,\n original_channel_id=self.id,\n source_channel_id=self.id,\n changed=True,\n complete=True,\n )\n # Ensure that locust or unit tests raise if there are any concurrency issues with tree ids.\n if settings.DEBUG:\n if ContentNode.objects.filter(parent=None, tree_id=self.main_tree.tree_id).count() != 1:\n raise AssertionError\n\n if not self.trash_tree:\n self.trash_tree = ContentNode.objects.create(\n title=self.name,\n kind_id=content_kinds.TOPIC,\n content_id=self.id,\n node_id=self.id,\n )\n\n # if this change affects the published channel list, clear the channel cache\n if self.public and (self.main_tree and self.main_tree.published):\n delete_public_channel_cache_keys()\n\n def on_update(self):\n from contentcuration.utils.user import calculate_user_storage\n original_values = self._field_updates.changed()\n record_channel_stats(self, original_values)\n\n blacklist = set([\n \"public\",\n \"main_tree_id\",\n \"version\",\n ])\n\n if self.main_tree and original_values and any((True for field in original_values if field not in blacklist)):\n # Changing channel metadata should also mark main_tree as changed\n self.main_tree.changed = True\n\n # Check if original thumbnail is no longer referenced\n if \"thumbnail\" in original_values and original_values[\"thumbnail\"] and 'static' not in original_values[\"thumbnail\"]:\n filename, ext = os.path.splitext(original_values[\"thumbnail\"])\n delete_empty_file_reference(filename, ext[1:])\n\n # Refresh storage for all editors on the channel\n if \"deleted\" in original_values:\n for editor in self.editors.all():\n calculate_user_storage(editor.pk)\n\n # Delete db if channel has been deleted and mark as unpublished\n if \"deleted\" in original_values and not original_values[\"deleted\"]:\n self.pending_editors.all().delete()\n export_db_storage_path = os.path.join(settings.DB_ROOT, \"{channel_id}.sqlite3\".format(channel_id=self.id))\n if default_storage.exists(export_db_storage_path):\n default_storage.delete(export_db_storage_path)\n if self.main_tree:\n self.main_tree.published = False\n\n if self.main_tree and self.main_tree._field_updates.changed():\n self.main_tree.save()\n\n # if this change affects the published channel list, clear the channel cache\n if \"public\" in original_values and (self.main_tree and self.main_tree.published):\n delete_public_channel_cache_keys()\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n\n super(Channel, self).save(*args, **kwargs)\n\n def get_thumbnail(self):\n return get_channel_thumbnail(self)\n\n def has_changes(self):\n return self.main_tree.get_descendants(include_self=True).filter(changed=True).exists()\n\n def get_date_modified(self):\n return self.main_tree.get_descendants(include_self=True).aggregate(last_modified=Max('modified'))['last_modified']\n\n def get_resource_count(self):\n return self.main_tree.get_descendants().exclude(kind_id=content_kinds.TOPIC).order_by('content_id').distinct('content_id').count()\n\n def get_human_token(self):\n return self.secret_tokens.get(is_primary=True)\n\n def get_channel_id_token(self):\n return self.secret_tokens.get(token=self.id)\n\n def make_token(self):\n token = self.secret_tokens.create(token=SecretToken.generate_new_token(), is_primary=True)\n self.secret_tokens.get_or_create(token=self.id)\n return token\n\n def make_public(self, bypass_signals=False):\n \"\"\"\n Sets the current channel object to be public and viewable by anyone.\n\n If bypass_signals is True, update the model in such a way that we\n prevent any model signals from running due to the update.\n\n Returns the same channel object.\n \"\"\"\n if bypass_signals:\n self.public = True # set this attribute still, so the object will be updated\n Channel.objects.filter(id=self.id).update(public=True)\n # clear the channel cache\n delete_public_channel_cache_keys()\n else:\n self.public = True\n self.save()\n\n return self\n\n def mark_created(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.CREATION)\n\n def mark_publishing(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.PUBLICATION)\n self.main_tree.publishing = True\n self.main_tree.save()\n\n def mark_deleted(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.DELETION)\n self.deleted = True\n self.save()\n\n def mark_recovered(self, user):\n self.history.create(actor_id=to_pk(user), action=channel_history.RECOVERY)\n self.deleted = False\n self.save()\n\n @property\n def deletion_history(self):\n return self.history.filter(action=channel_history.DELETION)\n\n @property\n def publishing_history(self):\n return self.history.filter(action=channel_history.PUBLICATION)\n\n @classmethod\n def get_public_channels(cls, defer_nonmain_trees=False):\n \"\"\"\n Get all public channels.\n\n If defer_nonmain_trees is True, defer the loading of all\n trees except for the main_tree.\"\"\"\n if defer_nonmain_trees:\n c = (Channel.objects\n .filter(public=True)\n .exclude(deleted=True)\n .select_related('main_tree')\n .prefetch_related('editors')\n .defer('trash_tree', 'clipboard_tree', 'staging_tree', 'chef_tree', 'previous_tree', 'viewers'))\n else:\n c = Channel.objects.filter(public=True).exclude(deleted=True)\n\n return c\n\n class Meta:\n verbose_name = \"Channel\"\n verbose_name_plural = \"Channels\"\n\n indexes = [\n models.Index(fields=[\"name\"], name=CHANNEL_NAME_INDEX_NAME),\n ]\n index_together = [\n [\"deleted\", \"public\"]\n ]\n\n\nCHANNEL_HISTORY_CHANNEL_INDEX_NAME = \"idx_channel_history_channel_id\"\n\n\nclass ChannelHistory(models.Model):\n \"\"\"\n Model for tracking certain actions performed on a channel\n \"\"\"\n channel = models.ForeignKey('Channel', null=False, blank=False, related_name='history', on_delete=models.CASCADE)\n actor = models.ForeignKey('User', null=False, blank=False, related_name='channel_history', on_delete=models.CASCADE)\n performed = models.DateTimeField(default=timezone.now)\n action = models.CharField(max_length=50, choices=channel_history.choices)\n\n @classmethod\n def prune(cls):\n \"\"\"\n Prunes history records by keeping the most recent actions for each channel and type,\n and deleting all other older actions\n \"\"\"\n keep_ids = cls.objects.distinct(\"channel_id\", \"action\").order_by(\"channel_id\", \"action\", \"-performed\").values_list(\"id\", flat=True)\n cls.objects.exclude(id__in=keep_ids).delete()\n\n class Meta:\n verbose_name = \"Channel history\"\n verbose_name_plural = \"Channel histories\"\n\n indexes = [\n models.Index(fields=[\"channel_id\"], name=CHANNEL_HISTORY_CHANNEL_INDEX_NAME),\n ]\n\n\nclass UserHistory(models.Model):\n \"\"\"\n Model that stores the user's action history.\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=False, blank=False, related_name=\"history\", on_delete=models.CASCADE)\n action = models.CharField(max_length=32, choices=user_history.choices)\n\n performed_at = models.DateTimeField(default=timezone.now)\n\n\nclass ChannelSet(models.Model):\n # NOTE: this is referred to as \"channel collections\" on the front-end, but we need to call it\n # something else as there is already a ChannelCollection model on the front-end\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n name = models.CharField(max_length=200, blank=True)\n description = models.CharField(max_length=400, blank=True)\n public = models.BooleanField(default=False, db_index=True)\n editors = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='channel_sets',\n verbose_name=\"editors\",\n help_text=\"Users with edit rights\",\n blank=True,\n )\n secret_token = models.ForeignKey('SecretToken', null=True, blank=True, related_name='channel_sets', on_delete=models.SET_NULL)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n user_id = not user.is_anonymous and user.id\n edit = Exists(User.channel_sets.through.objects.filter(user_id=user_id, channelset_id=OuterRef(\"id\")))\n queryset = queryset.annotate(edit=edit)\n if user.is_admin:\n return queryset\n\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n return cls.filter_edit_queryset(queryset, user)\n\n def get_channels(self):\n if self.secret_token:\n return self.secret_token.channels.filter(deleted=False)\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n\n super(ChannelSet, self).save()\n\n def on_create(self):\n if not self.secret_token:\n self.secret_token = SecretToken.objects.create(token=SecretToken.generate_new_token())\n\n def delete(self, *args, **kwargs):\n super(ChannelSet, self).delete(*args, **kwargs)\n\n if self.secret_token:\n self.secret_token.delete()\n\n\nclass ContentTag(models.Model):\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n tag_name = models.CharField(max_length=50)\n channel = models.ForeignKey('Channel', related_name='tags', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)\n objects = CustomManager()\n\n def __str__(self):\n return self.tag_name\n\n class Meta:\n unique_together = ['tag_name', 'channel']\n\n\nclass License(models.Model):\n \"\"\"\n Normalize the license of ContentNode model\n \"\"\"\n license_name = models.CharField(max_length=50)\n license_url = models.URLField(blank=True)\n license_description = models.TextField(blank=True)\n copyright_holder_required = models.BooleanField(default=True)\n is_custom = models.BooleanField(default=False)\n exists = models.BooleanField(\n default=False,\n verbose_name=\"license exists\",\n help_text=\"Tells whether or not a content item is licensed to share\",\n )\n\n @classmethod\n def validate_name(cls, name):\n if cls.objects.filter(license_name=name).count() == 0:\n raise ValidationError('License `{}` does not exist'.format(name))\n\n def __str__(self):\n return self.license_name\n\n\nNODE_ID_INDEX_NAME = \"node_id_idx\"\nNODE_MODIFIED_INDEX_NAME = \"node_modified_idx\"\nNODE_MODIFIED_DESC_INDEX_NAME = \"node_modified_desc_idx\"\nCONTENTNODE_TREE_ID_CACHE_KEY = \"contentnode_{pk}__tree_id\"\n\n\nclass ContentNode(MPTTModel, models.Model):\n \"\"\"\n By default, all nodes have a title and can be used as a topic.\n \"\"\"\n # Random id used internally on Studio (See `node_id` for id used in Kolibri)\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n\n # the content_id is used for tracking a user's interaction with a piece of\n # content, in the face of possibly many copies of that content. When a user\n # interacts with a piece of content, all substantially similar pieces of\n # content should be marked as such as well. We track these \"substantially\n # similar\" types of content by having them have the same content_id.\n content_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False, db_index=True)\n # Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta\n node_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)\n\n # TODO: disallow nulls once existing models have been set\n original_channel_id = UUIDField(primary_key=False, editable=False, null=True,\n db_index=True) # Original channel copied from\n source_channel_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate channel copied from\n # Original node_id of node copied from (TODO: original_node_id clashes with original_node field - temporary)\n original_source_node_id = UUIDField(primary_key=False, editable=False, null=True,\n db_index=True)\n source_node_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate node_id of node copied from\n\n # Fields specific to content generated by Ricecooker\n source_id = models.CharField(max_length=200, blank=True, null=True)\n source_domain = models.CharField(max_length=300, blank=True, null=True)\n\n title = models.CharField(max_length=200, blank=True)\n description = models.TextField(blank=True)\n kind = models.ForeignKey('ContentKind', related_name='contentnodes', db_index=True, null=True, blank=True, on_delete=models.SET_NULL)\n license = models.ForeignKey('License', null=True, blank=True, on_delete=models.SET_NULL)\n license_description = models.CharField(max_length=400, null=True, blank=True)\n prerequisite = models.ManyToManyField('self', related_name='is_prerequisite_of',\n through='PrerequisiteContentRelationship', symmetrical=False, blank=True)\n is_related = models.ManyToManyField('self', related_name='relate_to', through='RelatedContentRelationship',\n symmetrical=False, blank=True)\n language = models.ForeignKey('Language', null=True, blank=True, related_name='content_language', on_delete=models.SET_NULL)\n parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True, on_delete=models.CASCADE)\n tags = models.ManyToManyField(ContentTag, symmetrical=False, related_name='tagged_content', blank=True)\n # No longer used\n sort_order = models.FloatField(max_length=50, default=1, verbose_name=\"sort order\",\n help_text=\"Ascending, lowest number shown first\")\n copyright_holder = models.CharField(max_length=200, null=True, blank=True, default=\"\",\n help_text=\"Organization of person who holds the essential rights\")\n # legacy field...\n original_node = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='duplicates')\n cloned_source = TreeForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='clones')\n\n thumbnail_encoding = models.TextField(blank=True, null=True)\n\n created = models.DateTimeField(default=timezone.now, verbose_name=\"created\")\n modified = models.DateTimeField(auto_now=True, verbose_name=\"modified\")\n published = models.BooleanField(default=False)\n publishing = models.BooleanField(default=False)\n complete = models.BooleanField(null=True)\n\n changed = models.BooleanField(default=True)\n \"\"\"\n Extra fields for exercises:\n - type: mastery model to use to determine completion\n - m: m value for M out of N mastery criteria\n - n: n value for M out of N mastery criteria\n \"\"\"\n extra_fields = JSONField(default=dict, blank=True, null=True)\n author = models.CharField(max_length=200, blank=True, default=\"\", help_text=\"Who created this content?\",\n null=True)\n aggregator = models.CharField(max_length=200, blank=True, default=\"\", help_text=\"Who gathered this content together?\",\n null=True)\n provider = models.CharField(max_length=200, blank=True, default=\"\", help_text=\"Who distributed this content?\",\n null=True)\n\n role_visibility = models.CharField(max_length=50, choices=roles.choices, default=roles.LEARNER)\n freeze_authoring_data = models.BooleanField(default=False)\n\n # Fields for metadata labels\n # These fields use a map to store applied labels\n # {\n # \"<label_id1>\": true,\n # \"<label_id2>\": true,\n # }\n grade_levels = models.JSONField(blank=True, null=True)\n resource_types = models.JSONField(blank=True, null=True)\n learning_activities = models.JSONField(blank=True, null=True)\n accessibility_labels = models.JSONField(blank=True, null=True)\n categories = models.JSONField(blank=True, null=True)\n learner_needs = models.JSONField(blank=True, null=True)\n\n # A field for storing a suggested duration for the content node\n # this duration should be in seconds.\n suggested_duration = models.IntegerField(blank=True, null=True, help_text=\"Suggested duration for the content node (in seconds)\")\n\n objects = CustomContentNodeTreeManager()\n\n # Track all updates and ignore a blacklist of attributes\n # when we check for changes\n _field_updates = FieldTracker()\n\n _permission_filter = Q(tree_id=OuterRef(\"tree_id\"))\n\n @classmethod\n def _annotate_channel_id(cls, queryset):\n # Annotate channel id\n return queryset.annotate(\n channel_id=Subquery(\n Channel.objects.filter(\n main_tree__tree_id=OuterRef(\"tree_id\")\n ).values_list(\"id\", flat=True)[:1]\n )\n )\n\n @classmethod\n def filter_by_pk(cls, pk):\n \"\"\"\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `False`, this always\n returns a queryset filtered by pk.\n\n When `settings.IS_CONTENTNODE_TABLE_PARTITIONED` is `True` and a ContentNode\n for `pk` exists, this returns a queryset filtered by `pk` AND `tree_id`. If\n a ContentNode does not exist for `pk` then an empty queryset is returned.\n \"\"\"\n query = ContentNode.objects.filter(pk=pk)\n\n if settings.IS_CONTENTNODE_TABLE_PARTITIONED is True:\n tree_id = cache.get(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk))\n\n if tree_id:\n query = query.filter(tree_id=tree_id)\n else:\n tree_id = ContentNode.objects.filter(pk=pk).values_list(\"tree_id\", flat=True).first()\n if tree_id:\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=pk), tree_id, None)\n query = query.filter(tree_id=tree_id)\n else:\n query = query.none()\n\n return query\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n if not user_id:\n return queryset.none()\n\n edit_cte = PermissionCTE.editable_channels(user_id)\n\n queryset = queryset.with_cte(edit_cte).annotate(\n edit=edit_cte.exists(cls._permission_filter),\n )\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n queryset = queryset.annotate(\n public=Exists(\n Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef(\"tree_id\")\n ).values(\"pk\")\n ),\n )\n\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)\n\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(\n edit=edit_cte.exists(cls._permission_filter),\n view=view_cte.exists(cls._permission_filter),\n )\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(\n Q(view=True)\n | Q(edit=True)\n | Q(public=True)\n )\n\n @raise_if_unsaved\n def get_root(self):\n # Only topics can be root nodes\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n return super(ContentNode, self).get_root()\n\n @raise_if_unsaved\n def get_root_id(self):\n # Only topics can be root nodes\n if self.is_root_node() and self.kind_id != content_kinds.TOPIC:\n return self\n\n return ContentNode.objects.values_list('pk', flat=True).get(\n tree_id=self._mpttfield('tree_id'),\n parent=None,\n )\n\n def get_tree_data(self, levels=float('inf')):\n \"\"\"\n Returns `levels`-deep tree information starting at current node.\n Args:\n levels (int): depth of tree hierarchy to return\n Returns:\n tree (dict): starting with self, with children list containing either\n the just the children's `node_id`s or full recusive tree.\n \"\"\"\n if self.kind_id == content_kinds.TOPIC:\n node_data = {\n \"title\": self.title,\n \"kind\": self.kind_id,\n \"node_id\": self.node_id,\n \"studio_id\": self.id,\n }\n children = self.children.all()\n if levels > 0:\n node_data[\"children\"] = [c.get_tree_data(levels=levels - 1) for c in children]\n return node_data\n if self.kind_id == content_kinds.EXERCISE:\n return {\n \"title\": self.title,\n \"kind\": self.kind_id,\n \"count\": self.assessment_items.count(),\n \"node_id\": self.node_id,\n \"studio_id\": self.id,\n }\n return {\n \"title\": self.title,\n \"kind\": self.kind_id,\n \"file_size\": self.files.values('file_size').aggregate(size=Sum('file_size'))['size'],\n \"node_id\": self.node_id,\n \"studio_id\": self.id,\n }\n\n def get_original_node(self):\n original_node = self.original_node or self\n if self.original_channel_id and self.original_source_node_id:\n original_tree_id = Channel.objects.select_related(\"main_tree\").get(pk=self.original_channel_id).main_tree.tree_id\n original_node = ContentNode.objects.filter(tree_id=original_tree_id, node_id=self.original_source_node_id).first() or \\\n ContentNode.objects.filter(tree_id=original_tree_id, content_id=self.content_id).first() or self\n return original_node\n\n def get_associated_presets(self):\n key = \"associated_presets_{}\".format(self.kind_id)\n cached_data = cache.get(key)\n if cached_data:\n return cached_data\n presets = list(FormatPreset.objects.filter(kind=self.kind).values())\n cache.set(key, presets, None)\n return presets\n\n def get_prerequisites(self):\n prerequisite_mapping = {}\n prerequisites = self.prerequisite.all()\n prereqlist = list(prerequisites)\n for prereq in prerequisites:\n prlist, prereqmapping = prereq.get_prerequisites()\n prerequisite_mapping.update({prereq.pk: prereqmapping})\n prereqlist.extend(prlist)\n return prereqlist, prerequisite_mapping\n\n def get_postrequisites(self):\n postrequisite_mapping = {}\n postrequisites = self.is_prerequisite_of.all()\n postreqlist = list(postrequisites)\n for postreq in postrequisites:\n prlist, postreqmapping = postreq.get_postrequisites()\n postrequisite_mapping.update({postreq.pk: postreqmapping})\n postreqlist.extend(prlist)\n return postreqlist, postrequisite_mapping\n\n def get_channel_id(self):\n if hasattr(self, \"channel_id\"):\n return self.channel_id\n channel = self.get_channel()\n if channel:\n return channel.id\n return None\n\n def get_channel(self):\n try:\n root = self.get_root()\n if not root:\n return None\n return Channel.objects.filter(Q(main_tree=root) | Q(chef_tree=root) | Q(trash_tree=root) | Q(staging_tree=root) | Q(previous_tree=root)).first()\n except (ObjectDoesNotExist, MultipleObjectsReturned, AttributeError):\n return None\n\n def get_thumbnail(self):\n # Problems with json.loads, so use ast.literal_eval to get dict\n if self.thumbnail_encoding:\n thumbnail_data = load_json_string(self.thumbnail_encoding)\n if type(thumbnail_data) is dict and thumbnail_data.get(\"base64\"):\n return thumbnail_data[\"base64\"]\n\n thumbnail = self.files.filter(preset__thumbnail=True).first()\n if thumbnail:\n return generate_storage_url(str(thumbnail))\n\n return \"\"\n\n @classmethod\n def get_nodes_with_title(cls, title, limit_to_children_of=None):\n \"\"\"\n Returns all ContentNodes with a given title. If limit_to_children_of\n is passed in with an id, only look at all the children of the node with that id.\n \"\"\"\n if limit_to_children_of:\n root = cls.objects.get(id=limit_to_children_of)\n return root.get_descendants().filter(title=title)\n return cls.objects.filter(title=title)\n\n def get_details(self, channel_id=None):\n \"\"\"\n Returns information about the node and its children, including total size, languages, files, etc.\n\n :return: A dictionary with detailed statistics and information about the node.\n \"\"\"\n from contentcuration.viewsets.common import SQArrayAgg\n from contentcuration.viewsets.common import SQCount\n from contentcuration.viewsets.common import SQRelatedArrayAgg\n from contentcuration.viewsets.common import SQSum\n from contentcuration.viewsets.common import SQJSONBKeyArrayAgg\n\n node = ContentNode.objects.filter(pk=self.id, tree_id=self.tree_id).order_by()\n\n descendants = (\n self.get_descendants()\n .values(\"id\")\n )\n\n if channel_id:\n channel = Channel.objects.filter(id=channel_id)[0]\n else:\n channel = self.get_channel()\n\n if not descendants.exists():\n data = {\n \"last_update\": pytz.utc.localize(datetime.now()).strftime(\n settings.DATE_TIME_FORMAT\n ),\n \"created\": self.created.strftime(settings.DATE_TIME_FORMAT),\n \"resource_count\": 0,\n \"resource_size\": 0,\n \"includes\": {\"coach_content\": 0, \"exercises\": 0},\n \"kind_count\": [],\n \"languages\": [],\n \"accessible_languages\": [],\n \"licenses\": [],\n \"tags\": [],\n \"copyright_holders\": [],\n \"authors\": [],\n \"aggregators\": [],\n \"providers\": [],\n \"sample_pathway\": [],\n \"original_channels\": [],\n \"sample_nodes\": [],\n \"levels\": [],\n \"categories\": [],\n }\n\n # Set cache with latest data\n cache.set(\"details_{}\".format(self.node_id), json.dumps(data), None)\n return data\n\n # Get resources\n resources = descendants.exclude(kind=content_kinds.TOPIC).order_by()\n nodes = With(\n File.objects.filter(contentnode_id__in=Subquery(resources.values(\"id\")))\n .values(\"checksum\", \"file_size\")\n .order_by(),\n name=\"nodes\",\n )\n file_query = (\n nodes.queryset().with_cte(nodes).values(\"checksum\", \"file_size\").distinct()\n )\n l_nodes = With(\n File.objects.filter(contentnode_id__in=Subquery(resources.values(\"id\")))\n .values(\"language_id\", \"preset_id\")\n .order_by(),\n name=\"l_nodes\",\n )\n accessible_languages_query = (\n l_nodes.queryset()\n .filter(preset_id=format_presets.VIDEO_SUBTITLE)\n .with_cte(l_nodes)\n .values(\"language__native_name\")\n .distinct()\n )\n\n tags_query = str(\n ContentTag.objects.filter(\n tagged_content__pk__in=descendants.values_list(\"pk\", flat=True)\n )\n .values(\"tag_name\")\n .annotate(count=Count(\"tag_name\"))\n .query\n ).replace(\"topic\", \"'topic'\")\n kind_count_query = str(\n resources.values(\"kind_id\").annotate(count=Count(\"kind_id\")).query\n ).replace(\"topic\", \"'topic'\")\n\n node = node.annotate(\n resource_count=SQCount(resources, field=\"id\"),\n resource_size=SQSum(file_query, field=\"file_size\"),\n copyright_holders=SQArrayAgg(\n resources.distinct(\"copyright_holder\").order_by(\"copyright_holder\"),\n field=\"copyright_holder\",\n ),\n authors=SQArrayAgg(\n resources.distinct(\"author\").order_by(\"author\"), field=\"author\"\n ),\n aggregators=SQArrayAgg(\n resources.distinct(\"aggregator\").order_by(\"aggregator\"),\n field=\"aggregator\",\n ),\n providers=SQArrayAgg(\n resources.distinct(\"provider\").order_by(\"provider\"), field=\"provider\"\n ),\n languages=SQRelatedArrayAgg(\n descendants.exclude(language=None)\n .distinct(\"language__native_name\")\n .order_by(),\n field=\"language__native_name\",\n fieldname=\"native_name\",\n ),\n accessible_languages=SQRelatedArrayAgg(\n accessible_languages_query,\n field=\"language__native_name\",\n fieldname=\"native_name\",\n ),\n licenses=SQRelatedArrayAgg(\n resources.exclude(license=None)\n .distinct(\"license__license_name\")\n .order_by(\"license__license_name\"),\n field=\"license__license_name\",\n fieldname=\"license_name\",\n ),\n kind_count=RawSQL(\n \"SELECT json_agg(row_to_json (x)) FROM ({}) as x\".format(\n kind_count_query\n ),\n (),\n ),\n tags_list=RawSQL(\n \"SELECT json_agg(row_to_json (x)) FROM ({}) as x\".format(tags_query), ()\n ),\n coach_content=SQCount(\n resources.filter(role_visibility=roles.COACH), field=\"id\"\n ),\n exercises=SQCount(\n resources.filter(kind_id=content_kinds.EXERCISE), field=\"id\"\n ),\n levels=SQJSONBKeyArrayAgg(\n descendants.exclude(grade_levels__isnull=True),\n field=\"grade_levels\",\n ),\n all_categories=SQJSONBKeyArrayAgg(\n descendants.exclude(categories__isnull=True),\n field=\"categories\",\n ),\n )\n\n # Get sample pathway by getting longest path\n # Using resources.aggregate adds a lot of time, use values that have already been fetched\n max_level = max(\n resources.values_list(\"level\", flat=True).order_by().distinct() or [0]\n )\n m_nodes = With(\n resources.values(\"id\", \"level\", \"tree_id\", \"lft\").order_by(),\n name=\"m_nodes\",\n )\n deepest_node_record = (\n m_nodes.queryset()\n .with_cte(m_nodes)\n .filter(level=max_level)\n .values(\"id\")\n .order_by(\"tree_id\", \"lft\")\n .first()\n )\n if deepest_node_record:\n deepest_node = ContentNode.objects.get(pk=deepest_node_record[\"id\"])\n pathway = (\n list(\n deepest_node.get_ancestors()\n .order_by()\n .exclude(parent=None)\n .values(\"title\", \"node_id\", \"kind_id\")\n .order_by()\n )\n if deepest_node_record\n else []\n )\n sample_nodes = (\n [\n {\n \"node_id\": n.node_id,\n \"title\": n.title,\n \"description\": n.description,\n \"thumbnail\": n.get_thumbnail(),\n \"kind\": n.kind_id,\n }\n for n in deepest_node.get_siblings(include_self=True)[0:4]\n ]\n if deepest_node_record\n else []\n )\n\n # Get list of channels nodes were originally imported from (omitting the current channel)\n channel_id = channel and channel.id\n originals = (\n resources.values(\"original_channel_id\")\n .annotate(count=Count(\"original_channel_id\"))\n .order_by(\"original_channel_id\")\n )\n originals = {c[\"original_channel_id\"]: c[\"count\"] for c in originals}\n original_channels = (\n Channel.objects.exclude(pk=channel_id)\n .filter(pk__in=originals.keys(), deleted=False)\n .order_by()\n )\n original_channels = [\n {\n \"id\": c.id,\n \"name\": \"{}{}\".format(\n c.name, _(\" (Original)\") if channel_id == c.id else \"\"\n ),\n \"thumbnail\": c.get_thumbnail(),\n \"count\": originals[c.id],\n }\n for c in original_channels\n ]\n\n node = (\n node.order_by()\n .values(\n \"id\",\n \"resource_count\",\n \"resource_size\",\n \"copyright_holders\",\n \"authors\",\n \"aggregators\",\n \"providers\",\n \"languages\",\n \"accessible_languages\",\n \"coach_content\",\n \"licenses\",\n \"tags_list\",\n \"kind_count\",\n \"exercises\",\n \"levels\",\n \"all_categories\",\n )\n .first()\n )\n for_educators = {\n \"coach_content\": node[\"coach_content\"],\n \"exercises\": node[\"exercises\"],\n }\n # Serialize data\n data = {\n \"last_update\": pytz.utc.localize(datetime.now()).strftime(\n settings.DATE_TIME_FORMAT\n ),\n \"created\": self.created.strftime(settings.DATE_TIME_FORMAT),\n \"resource_count\": node.get(\"resource_count\", 0),\n \"resource_size\": node.get(\"resource_size\", 0),\n \"includes\": for_educators,\n \"kind_count\": node.get(\"kind_count\") or [],\n \"languages\": node.get(\"languages\") or [],\n \"accessible_languages\": node.get(\"accessible_languages\") or [],\n \"licenses\": node.get(\"licenses\") or [],\n \"tags\": node.get(\"tags_list\") or [],\n \"original_channels\": original_channels,\n \"sample_pathway\": pathway,\n \"sample_nodes\": sample_nodes,\n # source model fields for the below default to an empty string, but can also be null\n \"authors\": list(filter(bool, node[\"authors\"])),\n \"aggregators\": list(filter(bool, node[\"aggregators\"])),\n \"providers\": list(filter(bool, node[\"providers\"])),\n \"copyright_holders\": list(filter(bool, node[\"copyright_holders\"])),\n \"levels\": node.get(\"levels\") or [],\n \"categories\": node.get(\"all_categories\") or [],\n }\n\n # Set cache with latest data\n cache.set(\"details_{}\".format(self.node_id), json.dumps(data), None)\n return data\n\n def has_changes(self):\n mptt_opts = self._mptt_meta\n # Ignore fields that are used for dirty tracking, and also mptt fields, as changes to these are tracked in mptt manager methods.\n blacklist = set([\n 'changed',\n 'modified',\n 'publishing',\n mptt_opts.tree_id_attr,\n mptt_opts.left_attr,\n mptt_opts.right_attr,\n mptt_opts.level_attr,\n ])\n original_values = self._field_updates.changed()\n return any((True for field in original_values if field not in blacklist))\n\n def recalculate_editors_storage(self):\n from contentcuration.utils.user import calculate_user_storage\n for editor in self.files.values_list('uploaded_by_id', flat=True).distinct():\n calculate_user_storage(editor)\n\n def mark_complete(self): # noqa C901\n errors = []\n # Is complete if title is falsy but only if not a root node.\n if not (bool(self.title) or self.parent_id is None):\n errors.append(\"Empty title\")\n if self.kind_id != content_kinds.TOPIC:\n if not self.license:\n errors.append(\"Missing license\")\n if self.license and self.license.is_custom and not self.license_description:\n errors.append(\"Missing license description for custom license\")\n if self.license and self.license.copyright_holder_required and not self.copyright_holder:\n errors.append(\"Missing required copyright holder\")\n if self.kind_id != content_kinds.EXERCISE and not self.files.filter(preset__supplementary=False).exists():\n errors.append(\"Missing default file\")\n if self.kind_id == content_kinds.EXERCISE:\n # Check to see if the exercise has at least one assessment item that has:\n if not self.assessment_items.filter(\n # Item with non-blank raw data\n ~Q(raw_data=\"\") | (\n # A non-blank question\n ~Q(question='')\n # Non-blank answers\n & ~Q(answers='[]')\n # With either an input question or one answer marked as correct\n & (Q(type=exercises.INPUT_QUESTION) | Q(answers__iregex=r'\"correct\":\\s*true'))\n )\n ).exists():\n errors.append(\"No questions with question text and complete answers\")\n # Check that it has a mastery model set\n # Either check for the previous location for the mastery model, or rely on our completion criteria validation\n # that if it has been set, then it has been set correctly.\n criterion = self.extra_fields.get(\"options\", {}).get(\"completion_criteria\")\n if not (self.extra_fields.get(\"mastery_model\") or criterion):\n errors.append(\"Missing mastery criterion\")\n if criterion:\n try:\n completion_criteria.validate(criterion, kind=content_kinds.EXERCISE)\n except completion_criteria.ValidationError:\n errors.append(\"Mastery criterion is defined but is invalid\")\n self.complete = not errors\n return errors\n\n def make_content_id_unique(self):\n \"\"\"\n If self is NOT an original contentnode (in other words, a copied contentnode)\n and a contentnode with same content_id exists then we update self's content_id.\n \"\"\"\n is_node_original = self.original_source_node_id is None or self.original_source_node_id == self.node_id\n node_same_content_id = ContentNode.objects.exclude(pk=self.pk).filter(content_id=self.content_id)\n if (not is_node_original) and node_same_content_id.exists():\n ContentNode.objects.filter(pk=self.pk).update(content_id=uuid.uuid4().hex)\n\n def on_create(self):\n self.changed = True\n self.recalculate_editors_storage()\n self.set_default_learning_activity()\n\n def on_update(self):\n self.changed = self.changed or self.has_changes()\n\n def move_to(self, target, *args, **kwargs):\n parent_was_trashtree = self.parent.channel_trash.exists()\n super(ContentNode, self).move_to(target, *args, **kwargs)\n self.save()\n\n # Update tree_id cache when node is moved to another tree\n cache.set(CONTENTNODE_TREE_ID_CACHE_KEY.format(pk=self.id), self.tree_id, None)\n\n # Recalculate storage if node was moved to or from the trash tree\n if target.channel_trash.exists() or parent_was_trashtree:\n self.recalculate_editors_storage()\n\n def set_default_learning_activity(self):\n if self.learning_activities is None:\n if self.kind in kind_activity_map:\n self.learning_activities = {\n kind_activity_map[self.kind]: True\n }\n\n def save(self, skip_lock=False, *args, **kwargs):\n if self._state.adding:\n self.on_create()\n else:\n self.on_update()\n\n # Logic borrowed from mptt - do a simple check to see if we have changed\n # the parent of the node. We use the mptt specific cached fields here\n # because these get updated by the mptt move methods, and so will be up to\n # date, meaning we can avoid locking the DB twice when the fields have already\n # been updated in the database.\n\n # If most moves are being done independently of just changing the parent\n # and then calling a save, locking within the save method itself should rarely\n # be triggered - meaning updates to contentnode metadata should only rarely\n # trigger a write lock on mptt fields.\n\n old_parent_id = self._field_updates.changed().get(\"parent_id\")\n if self._state.adding and (self.parent_id or self.parent):\n same_order = False\n elif old_parent_id is DeferredAttribute:\n same_order = True\n else:\n same_order = old_parent_id == self.parent_id\n\n if not same_order:\n changed_ids = list(filter(lambda x: x is not None, set([old_parent_id, self.parent_id])))\n else:\n changed_ids = []\n\n if not same_order and not skip_lock:\n # Lock the mptt fields for the trees of the old and new parent\n with ContentNode.objects.lock_mptt(*ContentNode.objects\n .filter(id__in=[pid for pid in [old_parent_id, self.parent_id] if pid])\n .values_list('tree_id', flat=True).distinct()):\n super(ContentNode, self).save(*args, **kwargs)\n # Always write to the database for the parent change updates, as we have\n # no persistent object references for the original and new parent to modify\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(changed=True)\n else:\n super(ContentNode, self).save(*args, **kwargs)\n # Always write to the database for the parent change updates, as we have\n # no persistent object references for the original and new parent to modify\n if changed_ids:\n ContentNode.objects.filter(id__in=changed_ids).update(changed=True)\n\n # Copied from MPTT\n save.alters_data = True\n\n def delete(self, *args, **kwargs):\n parent = self.parent or self._field_updates.changed().get('parent')\n if parent:\n parent.changed = True\n parent.save()\n\n self.recalculate_editors_storage()\n\n # Lock the mptt fields for the tree of this node\n with ContentNode.objects.lock_mptt(self.tree_id):\n return super(ContentNode, self).delete(*args, **kwargs)\n\n # Copied from MPTT\n delete.alters_data = True\n\n def copy_to(\n self,\n target=None,\n position=\"last-child\",\n pk=None,\n mods=None,\n excluded_descendants=None,\n can_edit_source_channel=None,\n batch_size=None,\n progress_tracker=None\n ):\n return self._tree_manager.copy_node(self, target, position, pk, mods, excluded_descendants, can_edit_source_channel, batch_size, progress_tracker)[0]\n\n def copy(self):\n return self.copy_to()\n\n def is_publishable(self):\n return self.complete and self.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists()\n\n class Meta:\n verbose_name = \"Topic\"\n verbose_name_plural = \"Topics\"\n # Do not allow two nodes with the same name on the same level\n # unique_together = ('parent', 'title')\n indexes = [\n models.Index(fields=[\"node_id\"], name=NODE_ID_INDEX_NAME),\n models.Index(fields=[\"-modified\"], name=NODE_MODIFIED_DESC_INDEX_NAME),\n ]\n\n\nclass ContentKind(models.Model):\n kind = models.CharField(primary_key=True, max_length=200, choices=content_kinds.choices)\n\n def __str__(self):\n return self.kind\n\n\nclass FileFormat(models.Model):\n extension = models.CharField(primary_key=True, max_length=40, choices=file_formats.choices)\n mimetype = models.CharField(max_length=200, blank=True)\n\n def __str__(self):\n return self.extension\n\n\nclass FormatPreset(models.Model):\n id = models.CharField(primary_key=True, max_length=150, choices=format_presets.choices)\n readable_name = models.CharField(max_length=400)\n multi_language = models.BooleanField(default=False)\n supplementary = models.BooleanField(default=False)\n thumbnail = models.BooleanField(default=False)\n subtitle = models.BooleanField(default=False)\n display = models.BooleanField(default=True) # Render on client side\n order = models.IntegerField(default=0)\n kind = models.ForeignKey(ContentKind, related_name='format_presets', null=True, on_delete=models.SET_NULL)\n allowed_formats = models.ManyToManyField(FileFormat, blank=True)\n\n def __str__(self):\n return self.id\n\n @classmethod\n def guess_format_preset(cls, filename):\n \"\"\"\n Guess the format preset of a filename based on its extension.\n\n Return None if format is unknown.\n \"\"\"\n\n _, ext = os.path.splitext(filename)\n ext = ext.lstrip(\".\")\n f = FormatPreset.objects.filter(\n allowed_formats__extension=ext,\n display=True\n )\n return f.first()\n\n @classmethod\n def get_preset(cls, preset_name):\n \"\"\"\n Get the FormatPreset object with that exact name.\n\n Returns None if that format preset is not found.\n \"\"\"\n try:\n return FormatPreset.objects.get(id=preset_name)\n except FormatPreset.DoesNotExist:\n return None\n\n\nclass Language(models.Model):\n id = models.CharField(max_length=14, primary_key=True)\n lang_code = models.CharField(max_length=3, db_index=True)\n lang_subcode = models.CharField(max_length=10, db_index=True, blank=True, null=True)\n readable_name = models.CharField(max_length=100, blank=True)\n native_name = models.CharField(max_length=100, blank=True)\n lang_direction = models.CharField(max_length=3, choices=languages.LANGUAGE_DIRECTIONS, default=languages.LANGUAGE_DIRECTIONS[0][0])\n\n def ietf_name(self):\n return \"{code}-{subcode}\".format(code=self.lang_code,\n subcode=self.lang_subcode) if self.lang_subcode else self.lang_code\n\n def __str__(self):\n return self.ietf_name()\n\n\nASSESSMENT_ID_INDEX_NAME = \"assessment_id_idx\"\n\n\nclass AssessmentItem(models.Model):\n type = models.CharField(max_length=50, default=\"multiplechoice\")\n question = models.TextField(blank=True)\n hints = models.TextField(default=\"[]\")\n answers = models.TextField(default=\"[]\")\n order = models.IntegerField(default=1)\n contentnode = models.ForeignKey('ContentNode', related_name=\"assessment_items\", blank=True, null=True,\n db_index=True, on_delete=models.CASCADE)\n # Note this field is indexed, but we are using the Index API to give it an explicit name, see the model Meta\n assessment_id = UUIDField(primary_key=False, default=uuid.uuid4, editable=False)\n raw_data = models.TextField(blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n randomize = models.BooleanField(default=False)\n deleted = models.BooleanField(default=False)\n\n objects = CustomManager()\n # Track all updates\n _field_updates = FieldTracker()\n\n def has_changes(self):\n return bool(self._field_updates.changed())\n\n class Meta:\n indexes = [\n models.Index(fields=[\"assessment_id\"], name=ASSESSMENT_ID_INDEX_NAME),\n ]\n\n unique_together = ['contentnode', 'assessment_id']\n\n _permission_filter = Q(tree_id=OuterRef(\"contentnode__tree_id\"))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n if not user_id:\n return queryset.none()\n\n edit_cte = PermissionCTE.editable_channels(user_id)\n\n queryset = queryset.with_cte(edit_cte).annotate(\n edit=edit_cte.exists(cls._permission_filter),\n )\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(edit=True)\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n queryset = queryset.annotate(\n public=Exists(\n Channel.objects.filter(\n public=True, main_tree__tree_id=OuterRef(\"contentnode__tree_id\")\n ).values(\"pk\")\n ),\n )\n\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)\n\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(\n edit=edit_cte.exists(cls._permission_filter),\n view=view_cte.exists(cls._permission_filter),\n )\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(Q(view=True) | Q(edit=True) | Q(public=True))\n\n def on_create(self):\n \"\"\"\n When an exercise is added to a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n \"\"\"\n When an exercise is updated of a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n When an exercise is deleted from a contentnode, update its content_id\n if it's a copied contentnode.\n \"\"\"\n self.contentnode.make_content_id_unique()\n return super(AssessmentItem, self).delete(*args, **kwargs)\n\n\nclass SlideshowSlide(models.Model):\n contentnode = models.ForeignKey('ContentNode', related_name=\"slideshow_slides\", blank=True, null=True,\n db_index=True, on_delete=models.CASCADE)\n sort_order = models.FloatField(default=1.0)\n metadata = JSONField(default=dict)\n\n\nclass StagedFile(models.Model):\n \"\"\"\n Keeps track of files uploaded through Ricecooker to avoid user going over disk quota limit\n \"\"\"\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='staged_files', blank=True, null=True, on_delete=models.CASCADE)\n\n\nFILE_DISTINCT_INDEX_NAME = \"file_checksum_file_size_idx\"\nFILE_MODIFIED_DESC_INDEX_NAME = \"file_modified_desc_idx\"\nFILE_DURATION_CONSTRAINT = \"file_media_duration_int\"\nMEDIA_PRESETS = [\n format_presets.AUDIO,\n format_presets.AUDIO_DEPENDENCY,\n format_presets.VIDEO_HIGH_RES,\n format_presets.VIDEO_LOW_RES,\n format_presets.VIDEO_DEPENDENCY,\n]\n\n\nclass File(models.Model):\n \"\"\"\n The bottom layer of the contentDB schema, defines the basic building brick for content.\n Things it can represent are, for example, mp4, avi, mov, html, css, jpeg, pdf, mp3...\n \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n checksum = models.CharField(max_length=400, blank=True, db_index=True)\n file_size = models.IntegerField(blank=True, null=True)\n file_on_disk = models.FileField(upload_to=object_storage_name, storage=default_storage, max_length=500,\n blank=True)\n contentnode = models.ForeignKey(ContentNode, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n assessment_item = models.ForeignKey(AssessmentItem, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n slideshow_slide = models.ForeignKey(SlideshowSlide, related_name='files', blank=True, null=True, db_index=True, on_delete=models.CASCADE)\n file_format = models.ForeignKey(FileFormat, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)\n preset = models.ForeignKey(FormatPreset, related_name='files', blank=True, null=True, db_index=True, on_delete=models.SET_NULL)\n language = models.ForeignKey(Language, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)\n original_filename = models.CharField(max_length=255, blank=True)\n source_url = models.CharField(max_length=400, blank=True, null=True)\n uploaded_by = models.ForeignKey(User, related_name='files', blank=True, null=True, on_delete=models.SET_NULL)\n\n modified = models.DateTimeField(auto_now=True, verbose_name=\"modified\", null=True)\n duration = models.IntegerField(blank=True, null=True)\n\n objects = CustomManager()\n\n _permission_filter = Q(tree_id=OuterRef(\"contentnode__tree_id\")) | Q(tree_id=OuterRef(\"assessment_item__contentnode__tree_id\"))\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n if not user_id:\n return queryset.none()\n\n cte = PermissionCTE.editable_channels(user_id)\n queryset = queryset.with_cte(cte).annotate(edit=cte.exists(cls._permission_filter))\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(\n Q(edit=True) | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)\n )\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n user_id = not user.is_anonymous and user.id\n\n queryset = queryset.annotate(\n public=Exists(\n Channel.objects.filter(public=True).filter(\n Q(main_tree__tree_id=OuterRef(\"contentnode__tree_id\"))\n | Q(main_tree__tree_id=OuterRef(\"assessment_item__contentnode__tree_id\"))\n ).values(\"pk\")\n ),\n )\n\n if not user_id:\n return queryset.annotate(edit=boolean_val(False), view=boolean_val(False)).filter(public=True)\n\n edit_cte = PermissionCTE.editable_channels(user_id)\n view_cte = PermissionCTE.view_only_channels(user_id)\n\n queryset = queryset.with_cte(edit_cte).with_cte(view_cte).annotate(\n edit=edit_cte.exists(cls._permission_filter),\n view=view_cte.exists(cls._permission_filter),\n )\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(\n Q(view=True)\n | Q(edit=True)\n | Q(public=True)\n | Q(uploaded_by=user, contentnode__isnull=True, assessment_item__isnull=True)\n )\n\n class Admin:\n pass\n\n def __str__(self):\n return '{checksum}{extension}'.format(checksum=self.checksum, extension='.' + self.file_format.extension)\n\n def filename(self):\n \"\"\"\n Returns just the filename of the File in storage, without the path\n\n e.g. abcd.mp4\n \"\"\"\n # TODO(aron): write tests for this\n\n return os.path.basename(self.file_on_disk.name)\n\n def update_contentnode_content_id(self):\n \"\"\"\n If the file is attached to a contentnode and is not a thumbnail\n then update that contentnode's content_id if it's a copied contentnode.\n \"\"\"\n if self.contentnode and self.preset.thumbnail is False:\n self.contentnode.make_content_id_unique()\n\n def on_update(self):\n # since modified was added later as a nullable field to File, we don't use a default but\n # instead we'll just make sure it's always updated through our serializers\n self.modified = timezone.now()\n self.update_contentnode_content_id()\n\n def save(self, set_by_file_on_disk=True, *args, **kwargs):\n \"\"\"\n Overrider the default save method.\n If the file_on_disk FileField gets passed a content copy:\n 1. generate the MD5 from the content copy\n 2. fill the other fields accordingly\n \"\"\"\n from contentcuration.utils.user import calculate_user_storage\n\n # check if the file format exists in file_formats.choices\n if self.file_format_id:\n if self.file_format_id not in dict(file_formats.choices):\n raise ValidationError(\"Invalid file_format\")\n\n if set_by_file_on_disk and self.file_on_disk: # if file_on_disk is supplied, hash out the file\n if self.checksum is None or self.checksum == \"\":\n md5 = hashlib.md5()\n for chunk in self.file_on_disk.chunks():\n md5.update(chunk)\n\n self.checksum = md5.hexdigest()\n if not self.file_size:\n self.file_size = self.file_on_disk.size\n if not self.file_format_id:\n ext = os.path.splitext(self.file_on_disk.name)[1].lstrip('.')\n if ext in list(dict(file_formats.choices).keys()):\n self.file_format_id = ext\n else:\n raise ValueError(\"Files of type `{}` are not supported.\".format(ext))\n\n super(File, self).save(*args, **kwargs)\n\n if self.uploaded_by_id:\n calculate_user_storage(self.uploaded_by_id)\n\n class Meta:\n indexes = [\n models.Index(fields=['checksum', 'file_size'], name=FILE_DISTINCT_INDEX_NAME),\n models.Index(fields=[\"-modified\"], name=FILE_MODIFIED_DESC_INDEX_NAME),\n ]\n constraints = [\n # enforces that duration is null when not a media preset, but the duration may be null for media presets\n # but if not-null, should be greater than 0\n models.CheckConstraint(\n check=(Q(preset__in=MEDIA_PRESETS, duration__gt=0) | Q(duration__isnull=True)),\n name=FILE_DURATION_CONSTRAINT\n )\n ]\n\n\n@receiver(models.signals.post_delete, sender=File)\ndef auto_delete_file_on_delete(sender, instance, **kwargs):\n \"\"\"\n Deletes file from filesystem if no other File objects are referencing the same file on disk\n when corresponding `File` object is deleted.\n Be careful! we don't know if this will work when perform bash delete on File obejcts.\n \"\"\"\n # Recalculate storage\n from contentcuration.utils.user import calculate_user_storage\n if instance.uploaded_by_id:\n calculate_user_storage(instance.uploaded_by_id)\n\n\ndef delete_empty_file_reference(checksum, extension):\n filename = checksum + '.' + extension\n if not File.objects.filter(checksum=checksum).exists() and not Channel.objects.filter(thumbnail=filename).exists():\n storage_path = generate_object_storage_name(checksum, filename)\n if default_storage.exists(storage_path):\n default_storage.delete(storage_path)\n\n\nclass PrerequisiteContentRelationship(models.Model):\n \"\"\"\n Predefine the prerequisite relationship between two ContentNode objects.\n \"\"\"\n target_node = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_target_node', on_delete=models.CASCADE)\n prerequisite = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_prerequisite', on_delete=models.CASCADE)\n\n class Meta:\n unique_together = ['target_node', 'prerequisite']\n\n def clean(self, *args, **kwargs):\n # self reference exception\n if self.target_node == self.prerequisite:\n raise IntegrityError('Cannot self reference as prerequisite.')\n # immediate cyclic exception\n if PrerequisiteContentRelationship.objects.using(self._state.db) \\\n .filter(target_node=self.prerequisite, prerequisite=self.target_node):\n raise IntegrityError(\n 'Note: Prerequisite relationship is directional! %s and %s cannot be prerequisite of each other!'\n % (self.target_node, self.prerequisite))\n # distant cyclic exception\n # elif <this is a nice to have exception, may implement in the future when the priority raises.>\n # raise Exception('Note: Prerequisite relationship is acyclic! %s and %s forms a closed loop!' % (\n # self.target_node, self.prerequisite\n # ))\n super(PrerequisiteContentRelationship, self).clean(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.full_clean()\n super(PrerequisiteContentRelationship, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s' % (self.pk)\n\n\nclass RelatedContentRelationship(models.Model):\n \"\"\"\n Predefine the related relationship between two ContentNode objects.\n \"\"\"\n contentnode_1 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_1', on_delete=models.CASCADE)\n contentnode_2 = models.ForeignKey(ContentNode, related_name='%(app_label)s_%(class)s_2', on_delete=models.CASCADE)\n\n class Meta:\n unique_together = ['contentnode_1', 'contentnode_2']\n\n def save(self, *args, **kwargs):\n # self reference exception\n if self.contentnode_1 == self.contentnode_2:\n raise IntegrityError('Cannot self reference as related.')\n # handle immediate cyclic\n if RelatedContentRelationship.objects.using(self._state.db) \\\n .filter(contentnode_1=self.contentnode_2, contentnode_2=self.contentnode_1):\n return # silently cancel the save\n super(RelatedContentRelationship, self).save(*args, **kwargs)\n\n\nclass Invitation(models.Model):\n \"\"\" Invitation to edit channel \"\"\"\n id = UUIDField(primary_key=True, default=uuid.uuid4)\n accepted = models.BooleanField(default=False)\n declined = models.BooleanField(default=False)\n revoked = models.BooleanField(default=False)\n invited = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, related_name='sent_to')\n share_mode = models.CharField(max_length=50, default=EDIT_ACCESS)\n email = models.EmailField(max_length=100, null=True)\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_by', null=True, on_delete=models.CASCADE)\n channel = models.ForeignKey('Channel', null=True, related_name='pending_editors', on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100, blank=True)\n last_name = models.CharField(max_length=100, blank=True, null=True)\n\n class Meta:\n verbose_name = \"Invitation\"\n verbose_name_plural = \"Invitations\"\n\n def accept(self):\n user = User.objects.filter(email__iexact=self.email).first()\n if self.channel:\n # channel is a nullable field, so check that it exists.\n if self.share_mode == VIEW_ACCESS:\n self.channel.editors.remove(user)\n self.channel.viewers.add(user)\n else:\n self.channel.viewers.remove(user)\n self.channel.editors.add(user)\n\n @classmethod\n def filter_edit_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n\n if user.is_admin:\n return queryset\n\n return queryset.filter(\n Q(email__iexact=user.email)\n | Q(sender=user)\n | Q(channel__editors=user)\n ).distinct()\n\n @classmethod\n def filter_view_queryset(cls, queryset, user):\n if user.is_anonymous:\n return queryset.none()\n\n if user.is_admin:\n return queryset\n return queryset.filter(\n Q(email__iexact=user.email)\n | Q(sender=user)\n | Q(channel__editors=user)\n | Q(channel__viewers=user)\n ).distinct()\n\n\nclass Change(models.Model):\n server_rev = models.BigAutoField(primary_key=True)\n # We need to store the user who is applying this change\n # so that we can validate they have permissions to do so\n # allow to be null so that we don't lose changes if a user\n # account is hard deleted.\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.SET_NULL, related_name=\"changes_by_user\")\n # Almost all changes are related to channels, but some are specific only to users\n # so we allow this to be nullable for these edge cases.\n # Indexed by default because it's a ForeignKey field.\n channel = models.ForeignKey(Channel, null=True, blank=True, on_delete=models.CASCADE)\n # For those changes related to users, store a user value instead of channel\n # this may be different to created_by, as changes to invitations affect individual users.\n # Indexed by default because it's a ForeignKey field.\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE, related_name=\"changes_about_user\")\n # Use client_rev to keep track of changes coming from the client side\n # but let it be blank or null for changes we generate on the server side\n client_rev = models.IntegerField(null=True, blank=True)\n # client_rev numbers are by session, we add the session key here for bookkeeping\n # to allow a check within the same session to return whether a change has been applied\n # or not, and hence remove it from the frontend\n session = models.ForeignKey(Session, null=True, blank=True, on_delete=models.SET_NULL)\n table = models.CharField(max_length=32)\n change_type = models.IntegerField()\n # Use the DRF JSONEncoder class as the encoder here\n # so that we can handle anything that has been deserialized by DRF\n # or that will be later be serialized by DRF\n kwargs = JSONField(encoder=JSONEncoder)\n applied = models.BooleanField(default=False)\n errored = models.BooleanField(default=False)\n\n @classmethod\n def _create_from_change(cls, created_by_id=None, channel_id=None, user_id=None, session_key=None, applied=False, table=None, rev=None, **data):\n change_type = data.pop(\"type\")\n if table is None or table not in ALL_TABLES:\n raise TypeError(\"table is a required argument for creating changes and must be a valid table name\")\n if change_type is None or change_type not in ALL_CHANGES:\n raise TypeError(\"change_type is a required argument for creating changes and must be a valid change type integer\")\n return cls(\n session_id=session_key,\n created_by_id=created_by_id,\n channel_id=channel_id,\n user_id=user_id,\n client_rev=rev,\n table=table,\n change_type=change_type,\n kwargs=data,\n applied=applied\n )\n\n @classmethod\n def create_changes(cls, changes, created_by_id=None, session_key=None, applied=False):\n change_models = []\n for change in changes:\n change_models.append(cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, **change))\n\n cls.objects.bulk_create(change_models)\n return change_models\n\n @classmethod\n def create_change(cls, change, created_by_id=None, session_key=None, applied=False):\n obj = cls._create_from_change(created_by_id=created_by_id, session_key=session_key, applied=applied, **change)\n obj.save()\n return obj\n\n @classmethod\n def serialize(cls, change):\n datum = get_attribute(change, [\"kwargs\"]).copy()\n datum.update({\n \"server_rev\": get_attribute(change, [\"server_rev\"]),\n \"table\": get_attribute(change, [\"table\"]),\n \"type\": get_attribute(change, [\"change_type\"]),\n \"channel_id\": get_attribute(change, [\"channel_id\"]),\n \"user_id\": get_attribute(change, [\"user_id\"]),\n \"created_by_id\": get_attribute(change, [\"created_by_id\"])\n })\n return datum\n\n def serialize_to_change_dict(self):\n return self.serialize(self)\n\n\nclass TaskResultCustom(object):\n \"\"\"\n Custom fields to add to django_celery_results's TaskResult model\n\n If adding fields to this class, run `makemigrations` then move the generated migration from the\n `django_celery_results` app to the `contentcuration` app and override the constructor to change\n the app_label. See `0141_add_task_signature` for an example\n \"\"\"\n # user shouldn't be null, but in order to append the field, this needs to be allowed\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\"tasks\", on_delete=models.CASCADE, null=True)\n channel_id = DjangoUUIDField(db_index=True, null=True, blank=True)\n progress = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(0), MaxValueValidator(100)])\n # a hash of the task name and kwargs for identifying repeat tasks\n signature = models.CharField(null=True, blank=False, max_length=32)\n\n super_as_dict = TaskResult.as_dict\n\n def as_dict(self):\n \"\"\"\n :return: A dictionary representation\n \"\"\"\n super_dict = self.super_as_dict()\n super_dict.update(\n user_id=self.user_id,\n channel_id=self.channel_id,\n progress=self.progress,\n )\n return super_dict\n\n @classmethod\n def contribute_to_class(cls, model_class=TaskResult):\n \"\"\"\n Adds fields to model, by default TaskResult\n :param model_class: TaskResult model\n \"\"\"\n for field in dir(cls):\n if not field.startswith(\"_\") and field not in ('contribute_to_class', 'Meta'):\n model_class.add_to_class(field, getattr(cls, field))\n\n # manually add Meta afterwards\n setattr(model_class._meta, 'indexes', getattr(model_class._meta, 'indexes', []) + cls.Meta.indexes)\n\n class Meta:\n indexes = [\n # add index that matches query usage for signature\n models.Index(\n fields=['signature'],\n name='task_result_signature_idx',\n condition=Q(status__in=celery_states.UNREADY_STATES),\n ),\n ]\n\n\n# trigger class contributions immediately\nTaskResultCustom.contribute_to_class()\n",
"step-ids": [
65,
102,
158,
169,
216
]
}
|
[
65,
102,
158,
169,
216
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .authority import *
from .ca_pool import *
from .ca_pool_iam_binding import *
from .ca_pool_iam_member import *
from .ca_pool_iam_policy import *
from .certificate import *
from .certificate_template import *
from .certificate_template_iam_binding import *
from .certificate_template_iam_member import *
from .certificate_template_iam_policy import *
from .get_authority import *
from .get_ca_pool_iam_policy import *
from .get_certificate_template_iam_policy import *
from ._inputs import *
from . import outputs
|
normal
|
{
"blob_id": "4ca4d4bd684802b056417be4ee3d7d10e8f5dc85",
"index": 8842,
"step-1": "<mask token>\n",
"step-2": "from .. import _utilities\nimport typing\nfrom .authority import *\nfrom .ca_pool import *\nfrom .ca_pool_iam_binding import *\nfrom .ca_pool_iam_member import *\nfrom .ca_pool_iam_policy import *\nfrom .certificate import *\nfrom .certificate_template import *\nfrom .certificate_template_iam_binding import *\nfrom .certificate_template_iam_member import *\nfrom .certificate_template_iam_policy import *\nfrom .get_authority import *\nfrom .get_ca_pool_iam_policy import *\nfrom .get_certificate_template_iam_policy import *\nfrom ._inputs import *\nfrom . import outputs\n",
"step-3": "# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nfrom .. import _utilities\nimport typing\n# Export this package's modules as members:\nfrom .authority import *\nfrom .ca_pool import *\nfrom .ca_pool_iam_binding import *\nfrom .ca_pool_iam_member import *\nfrom .ca_pool_iam_policy import *\nfrom .certificate import *\nfrom .certificate_template import *\nfrom .certificate_template_iam_binding import *\nfrom .certificate_template_iam_member import *\nfrom .certificate_template_iam_policy import *\nfrom .get_authority import *\nfrom .get_ca_pool_iam_policy import *\nfrom .get_certificate_template_iam_policy import *\nfrom ._inputs import *\nfrom . import outputs\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def get_bios_boot_order(self):
result = {}
boot_device_list = []
boot_device_details = []
key = 'Bios'
bootsources = 'BootSources'
response = self.get_request(self.root_uri + self.systems_uri)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
bios_uri = data[key]['@odata.id']
response = self.get_request(self.root_uri + bios_uri)
if response['ret'] is False:
return response
data = response['data']
boot_mode = data['Attributes']['BootMode']
if boot_mode == 'Uefi':
boot_seq = 'UefiBootSeq'
else:
boot_seq = 'BootSeq'
response = self.get_request(self.root_uri + self.systems_uri + '/' +
bootsources)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
boot_device_list = data['Attributes'][boot_seq]
for b in boot_device_list:
boot_device = {}
boot_device['Index'] = b['Index']
boot_device['Name'] = b['Name']
boot_device['Enabled'] = b['Enabled']
boot_device_details.append(boot_device)
result['entries'] = boot_device_details
return result
<|reserved_special_token_1|>
def get_bios_boot_order(self):
result = {
}
boot_device_list = []
boot_device_details = []
key = 'Bios'
bootsources = 'BootSources'
response = self.get_request((self.root_uri + self.systems_uri))
if (response['ret'] is False):
return response
result['ret'] = True
data = response['data']
bios_uri = data[key]['@odata.id']
response = self.get_request((self.root_uri + bios_uri))
if (response['ret'] is False):
return response
data = response['data']
boot_mode = data['Attributes']['BootMode']
if (boot_mode == 'Uefi'):
boot_seq = 'UefiBootSeq'
else:
boot_seq = 'BootSeq'
response = self.get_request((((self.root_uri + self.systems_uri) + '/') + bootsources))
if (response['ret'] is False):
return response
result['ret'] = True
data = response['data']
boot_device_list = data['Attributes'][boot_seq]
for b in boot_device_list:
boot_device = {
}
boot_device['Index'] = b['Index']
boot_device['Name'] = b['Name']
boot_device['Enabled'] = b['Enabled']
boot_device_details.append(boot_device)
result['entries'] = boot_device_details
return result
|
flexible
|
{
"blob_id": "bbe7df31a44ccf51c305cd620dc7c4155b7e1a97",
"index": 2668,
"step-1": "<mask token>\n",
"step-2": "def get_bios_boot_order(self):\n result = {}\n boot_device_list = []\n boot_device_details = []\n key = 'Bios'\n bootsources = 'BootSources'\n response = self.get_request(self.root_uri + self.systems_uri)\n if response['ret'] is False:\n return response\n result['ret'] = True\n data = response['data']\n bios_uri = data[key]['@odata.id']\n response = self.get_request(self.root_uri + bios_uri)\n if response['ret'] is False:\n return response\n data = response['data']\n boot_mode = data['Attributes']['BootMode']\n if boot_mode == 'Uefi':\n boot_seq = 'UefiBootSeq'\n else:\n boot_seq = 'BootSeq'\n response = self.get_request(self.root_uri + self.systems_uri + '/' +\n bootsources)\n if response['ret'] is False:\n return response\n result['ret'] = True\n data = response['data']\n boot_device_list = data['Attributes'][boot_seq]\n for b in boot_device_list:\n boot_device = {}\n boot_device['Index'] = b['Index']\n boot_device['Name'] = b['Name']\n boot_device['Enabled'] = b['Enabled']\n boot_device_details.append(boot_device)\n result['entries'] = boot_device_details\n return result\n",
"step-3": "def get_bios_boot_order(self):\n result = {\n \n }\n boot_device_list = []\n boot_device_details = []\n key = 'Bios'\n bootsources = 'BootSources'\n response = self.get_request((self.root_uri + self.systems_uri))\n if (response['ret'] is False):\n return response\n result['ret'] = True\n data = response['data']\n bios_uri = data[key]['@odata.id']\n response = self.get_request((self.root_uri + bios_uri))\n if (response['ret'] is False):\n return response\n data = response['data']\n boot_mode = data['Attributes']['BootMode']\n if (boot_mode == 'Uefi'):\n boot_seq = 'UefiBootSeq'\n else:\n boot_seq = 'BootSeq'\n response = self.get_request((((self.root_uri + self.systems_uri) + '/') + bootsources))\n if (response['ret'] is False):\n return response\n result['ret'] = True\n data = response['data']\n boot_device_list = data['Attributes'][boot_seq]\n for b in boot_device_list:\n boot_device = {\n \n }\n boot_device['Index'] = b['Index']\n boot_device['Name'] = b['Name']\n boot_device['Enabled'] = b['Enabled']\n boot_device_details.append(boot_device)\n result['entries'] = boot_device_details\n return result",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 18:50:46 2019
@author: kanfar
"""
import numpy as np
import timeit
import matplotlib.pyplot as plt
from numpy import expand_dims, zeros, ones
from numpy.random import randn, randint
from keras.models import load_model
from keras.optimizers import Adam
from keras.models import Model
from keras.layers import Input, Reshape, Flatten, Concatenate
from keras.layers import Dense, Conv2D, Conv2DTranspose
from keras.layers import Dropout, LeakyReLU
class cGAN:
def __init__(self, input_dim1, input_dim2, input_dim3, latent_size):
self.input_dim1 = input_dim1
self.input_dim2 = input_dim2
self.input_dim3 = input_dim3
self.latent_size = latent_size
def discriminator(self):
#conditional input
input_shape = (self.input_dim1, self.input_dim2, self.input_dim3)
input_cond = Input(shape = input_shape)
#generator output
input_x = Input(shape = input_shape)
merge = Concatenate()([input_x, input_cond])
#downsample
out = Conv2D(32, (3,3), strides=(2,2), padding='same')(merge)
out = LeakyReLU(alpha=0.2)(out)
out = Conv2D(32, (3,3), strides=(2,2), padding='same')(out)
out = LeakyReLU(alpha=0.2)(out)
out = Flatten()(out)
out = Dropout(0.5)(out)
y = Dense(1, activation='sigmoid')(out)
# define model
model = Model([input_x, input_cond], y)
# compile model
opt = Adam(lr=0.0002) #0.0002 and beta_1 0.5
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
def generator(self):
#losing one pixel, figure out later
image_dim = self.input_dim1
latent_shape = self.latent_size
cond_shape = (image_dim, image_dim, self.input_dim3)
input_latent = Input(shape = (latent_shape,))
num_nodes = image_dim * image_dim
latent = Dense(num_nodes)(input_latent)
latent = LeakyReLU(alpha=0.2)(latent)
latent = Reshape((image_dim,image_dim,1))(latent)
input_cond = Input(shape = cond_shape)
cond = input_cond
merge = Concatenate()([latent,cond])
# upsample to 14x14
out = Conv2D(32, (4,4), strides=(1,1), padding='same')(merge)
out = LeakyReLU(alpha=0.2)(out)
# upsample to 28x28
out = Conv2D(32, (4,4), strides=(1,1), padding='same')(out)
out = LeakyReLU(alpha=0.2)(out)
out = Conv2D(32, (4,4), strides=(1,1), padding='same')(out)
out = LeakyReLU(alpha=0.2)(out)
# output
x = Conv2D(1, (4,4), strides=(1,1), activation='tanh', padding='same')(out) #something key that I don't understand
# define model
model = Model([input_latent, input_cond], x)
return model
def combined(self, g_model, d_model):
#model comprised of two models
# make weights in the discriminator not trainable
d_model.trainable = False
# get noise and label inputs from generator model
input_latent, input_cond = g_model.input #defining the tensors in a short way: this is saying the input to this model is the same size as input to g_model
# get image output from the generator model
x = g_model.output
#can I do x = g_model([input_latent, input_cond]) instead of the above?
# connect image output and label input from generator as inputs to discriminator
y = d_model([x, input_cond]) #why this needs to be connected but not the above???? does the first output take model input as default??????? test this
# define gan model as taking noise and label and outputting a classification
model = Model([input_latent, input_cond], y)
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def generate_real_samples(self, focused, defocused, n_samples):
idx = randint(0, focused.shape[0], n_samples)
x_real, input_cond = focused[idx,:,:,:], defocused[idx,:,:,:]
y_real = ones((n_samples,1))
return [x_real, input_cond], y_real
def generate_latent(self, latent_size, n_samples):
#generate points in teh latent space
total_latent = randn(latent_size*n_samples)
input_z = total_latent.reshape(n_samples, latent_size)
return input_z
def generate_fake_samples(self, generator, defocused, latent_dim, n_samples):
idx = randint(0, defocused.shape[0], n_samples)
input_cond = defocused[idx,:,:,:]
input_z = self.generate_latent(latent_dim, n_samples)
# predict outputs
x_fake = generator.predict([input_z, input_cond])
# create class labels
y_fake = zeros((n_samples, 1))
return [x_fake, input_cond], y_fake
def generate_gan_input(self, defocused, latent_dim, n_samples):
#defocused = data[1,:,:,:]
#defocused = np.expand_dims(input_cond, axis = -1)
idx = randint(0, defocused.shape[0], n_samples)
input_cond = defocused[idx,:,:,:]
input_z = self.generate_latent(latent_dim, n_samples)
# create class labels
y_gan = ones((n_samples, 1))
return [input_z, input_cond], y_gan
def train(self, g_model, d_model, gan_model, real, input_cond, latent_dim, n_epochs, n_batch, save):
bat_per_epo = int(real.shape[0] / n_batch) #check
half_batch = int(n_batch / 2)
g_loss = np.zeros(n_epochs)
d_loss_real = np.zeros(n_epochs)
d_loss_fake = np.zeros(n_epochs)
# manually enumerate epochs
for i in range(n_epochs):
start = timeit.default_timer()
# enumerate batches over the training set
print('================== Epoch %d ==================\n' % (i+1))
for j in range(bat_per_epo):
# get randomly selected 'real' samples
[x_real, input_cond_real], y_real = self.generate_real_samples(real, input_cond, half_batch)
# update discriminator model weights
d_loss_real[i], _ = d_model.train_on_batch([x_real, input_cond_real], y_real)
# generate 'fake' examples
[x_fake, input_cond_fake], y_fake = self.generate_fake_samples(g_model, input_cond, latent_dim, half_batch)
# update discriminator model weights
d_loss_fake[i], _ = d_model.train_on_batch([x_fake, input_cond_fake], y_fake)
# prepare points in latent space as input for the generator
[z_input, input_cond_gan], y_gan = self.generate_gan_input(input_cond, latent_dim, n_batch)
# update the generator via the discriminator's error
g_loss[i] = gan_model.train_on_batch([z_input, input_cond_gan], y_gan)
# summarize loss on this batch
print('Completed: %.f' % np.divide((j+1)*100,bat_per_epo) +'%')
print('Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f' %
(i+1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\n')
stop = timeit.default_timer()
print('Time: %.2f min' % ((stop - start)/60))
# save the generator model
g_model.save('./models/cgan_'+ save + '.h5') #save somewhere
# save loss history
loss = np.array([d_loss_real, d_loss_fake, g_loss])
np.save('./models/cgan_loss_' + save, loss)
def generate_fakes_givenOne(self, generator, focused, defocused, latent_dim, n_samples):
idx = randint(0, defocused.shape[0], 1)
x_real = focused[idx,:,:,:]
input_cond = defocused[idx,:,:,:] ##### should last be zero or :?
input_cond = np.repeat(input_cond, n_samples, axis=0)
input_z = self.generate_latent(latent_dim, n_samples)
x_fake = generator.predict([input_z, input_cond])
return x_real, x_fake, input_cond[0,:,:,:]
def generate_fakes_givenMany(self, generator, focused, defocused, latent_dim, n_examples):
n_samples = n_examples-2
x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape[2], focused.shape[3]))
input_cond_many = np.zeros((n_examples, focused.shape[1], focused.shape[2], focused.shape[3]))
x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1], focused.shape[2], focused.shape[3]))
for i in range(n_examples):
x_real_many[i,:,:,:], x_fake_many[i,:,:,:,:], input_cond_many[i,:,:,:] = self.generate_fakes_givenOne(generator, focused, defocused, latent_dim, n_samples)
return x_real_many, x_fake_many, input_cond_many
|
normal
|
{
"blob_id": "fc6c220f8a3a0e9dd1d6e6e1ca131136db8f8a58",
"index": 9155,
"step-1": "<mask token>\n\n\nclass cGAN:\n\n def __init__(self, input_dim1, input_dim2, input_dim3, latent_size):\n self.input_dim1 = input_dim1\n self.input_dim2 = input_dim2\n self.input_dim3 = input_dim3\n self.latent_size = latent_size\n\n def discriminator(self):\n input_shape = self.input_dim1, self.input_dim2, self.input_dim3\n input_cond = Input(shape=input_shape)\n input_x = Input(shape=input_shape)\n merge = Concatenate()([input_x, input_cond])\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Flatten()(out)\n out = Dropout(0.5)(out)\n y = Dense(1, activation='sigmoid')(out)\n model = Model([input_x, input_cond], y)\n opt = Adam(lr=0.0002)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[\n 'accuracy'])\n return model\n\n def generator(self):\n image_dim = self.input_dim1\n latent_shape = self.latent_size\n cond_shape = image_dim, image_dim, self.input_dim3\n input_latent = Input(shape=(latent_shape,))\n num_nodes = image_dim * image_dim\n latent = Dense(num_nodes)(input_latent)\n latent = LeakyReLU(alpha=0.2)(latent)\n latent = Reshape((image_dim, image_dim, 1))(latent)\n input_cond = Input(shape=cond_shape)\n cond = input_cond\n merge = Concatenate()([latent, cond])\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same'\n )(out)\n model = Model([input_latent, input_cond], x)\n return model\n\n def combined(self, g_model, d_model):\n d_model.trainable = False\n input_latent, input_cond = g_model.input\n x = g_model.output\n y = d_model([x, input_cond])\n model = Model([input_latent, input_cond], y)\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt)\n return model\n\n def generate_real_samples(self, focused, defocused, n_samples):\n idx = randint(0, focused.shape[0], n_samples)\n x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :]\n y_real = ones((n_samples, 1))\n return [x_real, input_cond], y_real\n\n def generate_latent(self, latent_size, n_samples):\n total_latent = randn(latent_size * n_samples)\n input_z = total_latent.reshape(n_samples, latent_size)\n return input_z\n\n def generate_fake_samples(self, generator, defocused, latent_dim, n_samples\n ):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n y_fake = zeros((n_samples, 1))\n return [x_fake, input_cond], y_fake\n <mask token>\n\n def train(self, g_model, d_model, gan_model, real, input_cond,\n latent_dim, n_epochs, n_batch, save):\n bat_per_epo = int(real.shape[0] / n_batch)\n half_batch = int(n_batch / 2)\n g_loss = np.zeros(n_epochs)\n d_loss_real = np.zeros(n_epochs)\n d_loss_fake = np.zeros(n_epochs)\n for i in range(n_epochs):\n start = timeit.default_timer()\n print('================== Epoch %d ==================\\n' % (i + 1))\n for j in range(bat_per_epo):\n [x_real, input_cond_real], y_real = self.generate_real_samples(\n real, input_cond, half_batch)\n d_loss_real[i], _ = d_model.train_on_batch([x_real,\n input_cond_real], y_real)\n [x_fake, input_cond_fake], y_fake = self.generate_fake_samples(\n g_model, input_cond, latent_dim, half_batch)\n d_loss_fake[i], _ = d_model.train_on_batch([x_fake,\n input_cond_fake], y_fake)\n [z_input, input_cond_gan], y_gan = self.generate_gan_input(\n input_cond, latent_dim, n_batch)\n g_loss[i] = gan_model.train_on_batch([z_input,\n input_cond_gan], y_gan)\n print('Completed: %.f' % np.divide((j + 1) * 100,\n bat_per_epo) + '%')\n print(\n 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f'\n % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\\n')\n stop = timeit.default_timer()\n print('Time: %.2f min' % ((stop - start) / 60))\n g_model.save('./models/cgan_' + save + '.h5')\n loss = np.array([d_loss_real, d_loss_fake, g_loss])\n np.save('./models/cgan_loss_' + save, loss)\n\n def generate_fakes_givenOne(self, generator, focused, defocused,\n latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], 1)\n x_real = focused[idx, :, :, :]\n input_cond = defocused[idx, :, :, :]\n input_cond = np.repeat(input_cond, n_samples, axis=0)\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n return x_real, x_fake, input_cond[0, :, :, :]\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass cGAN:\n\n def __init__(self, input_dim1, input_dim2, input_dim3, latent_size):\n self.input_dim1 = input_dim1\n self.input_dim2 = input_dim2\n self.input_dim3 = input_dim3\n self.latent_size = latent_size\n\n def discriminator(self):\n input_shape = self.input_dim1, self.input_dim2, self.input_dim3\n input_cond = Input(shape=input_shape)\n input_x = Input(shape=input_shape)\n merge = Concatenate()([input_x, input_cond])\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Flatten()(out)\n out = Dropout(0.5)(out)\n y = Dense(1, activation='sigmoid')(out)\n model = Model([input_x, input_cond], y)\n opt = Adam(lr=0.0002)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[\n 'accuracy'])\n return model\n\n def generator(self):\n image_dim = self.input_dim1\n latent_shape = self.latent_size\n cond_shape = image_dim, image_dim, self.input_dim3\n input_latent = Input(shape=(latent_shape,))\n num_nodes = image_dim * image_dim\n latent = Dense(num_nodes)(input_latent)\n latent = LeakyReLU(alpha=0.2)(latent)\n latent = Reshape((image_dim, image_dim, 1))(latent)\n input_cond = Input(shape=cond_shape)\n cond = input_cond\n merge = Concatenate()([latent, cond])\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same'\n )(out)\n model = Model([input_latent, input_cond], x)\n return model\n\n def combined(self, g_model, d_model):\n d_model.trainable = False\n input_latent, input_cond = g_model.input\n x = g_model.output\n y = d_model([x, input_cond])\n model = Model([input_latent, input_cond], y)\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt)\n return model\n\n def generate_real_samples(self, focused, defocused, n_samples):\n idx = randint(0, focused.shape[0], n_samples)\n x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :]\n y_real = ones((n_samples, 1))\n return [x_real, input_cond], y_real\n\n def generate_latent(self, latent_size, n_samples):\n total_latent = randn(latent_size * n_samples)\n input_z = total_latent.reshape(n_samples, latent_size)\n return input_z\n\n def generate_fake_samples(self, generator, defocused, latent_dim, n_samples\n ):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n y_fake = zeros((n_samples, 1))\n return [x_fake, input_cond], y_fake\n <mask token>\n\n def train(self, g_model, d_model, gan_model, real, input_cond,\n latent_dim, n_epochs, n_batch, save):\n bat_per_epo = int(real.shape[0] / n_batch)\n half_batch = int(n_batch / 2)\n g_loss = np.zeros(n_epochs)\n d_loss_real = np.zeros(n_epochs)\n d_loss_fake = np.zeros(n_epochs)\n for i in range(n_epochs):\n start = timeit.default_timer()\n print('================== Epoch %d ==================\\n' % (i + 1))\n for j in range(bat_per_epo):\n [x_real, input_cond_real], y_real = self.generate_real_samples(\n real, input_cond, half_batch)\n d_loss_real[i], _ = d_model.train_on_batch([x_real,\n input_cond_real], y_real)\n [x_fake, input_cond_fake], y_fake = self.generate_fake_samples(\n g_model, input_cond, latent_dim, half_batch)\n d_loss_fake[i], _ = d_model.train_on_batch([x_fake,\n input_cond_fake], y_fake)\n [z_input, input_cond_gan], y_gan = self.generate_gan_input(\n input_cond, latent_dim, n_batch)\n g_loss[i] = gan_model.train_on_batch([z_input,\n input_cond_gan], y_gan)\n print('Completed: %.f' % np.divide((j + 1) * 100,\n bat_per_epo) + '%')\n print(\n 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f'\n % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\\n')\n stop = timeit.default_timer()\n print('Time: %.2f min' % ((stop - start) / 60))\n g_model.save('./models/cgan_' + save + '.h5')\n loss = np.array([d_loss_real, d_loss_fake, g_loss])\n np.save('./models/cgan_loss_' + save, loss)\n\n def generate_fakes_givenOne(self, generator, focused, defocused,\n latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], 1)\n x_real = focused[idx, :, :, :]\n input_cond = defocused[idx, :, :, :]\n input_cond = np.repeat(input_cond, n_samples, axis=0)\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n return x_real, x_fake, input_cond[0, :, :, :]\n\n def generate_fakes_givenMany(self, generator, focused, defocused,\n latent_dim, n_examples):\n n_samples = n_examples - 2\n x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape\n [2], focused.shape[3]))\n input_cond_many = np.zeros((n_examples, focused.shape[1], focused.\n shape[2], focused.shape[3]))\n x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1],\n focused.shape[2], focused.shape[3]))\n for i in range(n_examples):\n x_real_many[i, :, :, :], x_fake_many[i, :, :, :, :\n ], input_cond_many[i, :, :, :] = self.generate_fakes_givenOne(\n generator, focused, defocused, latent_dim, n_samples)\n return x_real_many, x_fake_many, input_cond_many\n",
"step-3": "<mask token>\n\n\nclass cGAN:\n\n def __init__(self, input_dim1, input_dim2, input_dim3, latent_size):\n self.input_dim1 = input_dim1\n self.input_dim2 = input_dim2\n self.input_dim3 = input_dim3\n self.latent_size = latent_size\n\n def discriminator(self):\n input_shape = self.input_dim1, self.input_dim2, self.input_dim3\n input_cond = Input(shape=input_shape)\n input_x = Input(shape=input_shape)\n merge = Concatenate()([input_x, input_cond])\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Flatten()(out)\n out = Dropout(0.5)(out)\n y = Dense(1, activation='sigmoid')(out)\n model = Model([input_x, input_cond], y)\n opt = Adam(lr=0.0002)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[\n 'accuracy'])\n return model\n\n def generator(self):\n image_dim = self.input_dim1\n latent_shape = self.latent_size\n cond_shape = image_dim, image_dim, self.input_dim3\n input_latent = Input(shape=(latent_shape,))\n num_nodes = image_dim * image_dim\n latent = Dense(num_nodes)(input_latent)\n latent = LeakyReLU(alpha=0.2)(latent)\n latent = Reshape((image_dim, image_dim, 1))(latent)\n input_cond = Input(shape=cond_shape)\n cond = input_cond\n merge = Concatenate()([latent, cond])\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same'\n )(out)\n model = Model([input_latent, input_cond], x)\n return model\n\n def combined(self, g_model, d_model):\n d_model.trainable = False\n input_latent, input_cond = g_model.input\n x = g_model.output\n y = d_model([x, input_cond])\n model = Model([input_latent, input_cond], y)\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt)\n return model\n\n def generate_real_samples(self, focused, defocused, n_samples):\n idx = randint(0, focused.shape[0], n_samples)\n x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :]\n y_real = ones((n_samples, 1))\n return [x_real, input_cond], y_real\n\n def generate_latent(self, latent_size, n_samples):\n total_latent = randn(latent_size * n_samples)\n input_z = total_latent.reshape(n_samples, latent_size)\n return input_z\n\n def generate_fake_samples(self, generator, defocused, latent_dim, n_samples\n ):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n y_fake = zeros((n_samples, 1))\n return [x_fake, input_cond], y_fake\n\n def generate_gan_input(self, defocused, latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n y_gan = ones((n_samples, 1))\n return [input_z, input_cond], y_gan\n\n def train(self, g_model, d_model, gan_model, real, input_cond,\n latent_dim, n_epochs, n_batch, save):\n bat_per_epo = int(real.shape[0] / n_batch)\n half_batch = int(n_batch / 2)\n g_loss = np.zeros(n_epochs)\n d_loss_real = np.zeros(n_epochs)\n d_loss_fake = np.zeros(n_epochs)\n for i in range(n_epochs):\n start = timeit.default_timer()\n print('================== Epoch %d ==================\\n' % (i + 1))\n for j in range(bat_per_epo):\n [x_real, input_cond_real], y_real = self.generate_real_samples(\n real, input_cond, half_batch)\n d_loss_real[i], _ = d_model.train_on_batch([x_real,\n input_cond_real], y_real)\n [x_fake, input_cond_fake], y_fake = self.generate_fake_samples(\n g_model, input_cond, latent_dim, half_batch)\n d_loss_fake[i], _ = d_model.train_on_batch([x_fake,\n input_cond_fake], y_fake)\n [z_input, input_cond_gan], y_gan = self.generate_gan_input(\n input_cond, latent_dim, n_batch)\n g_loss[i] = gan_model.train_on_batch([z_input,\n input_cond_gan], y_gan)\n print('Completed: %.f' % np.divide((j + 1) * 100,\n bat_per_epo) + '%')\n print(\n 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f'\n % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\\n')\n stop = timeit.default_timer()\n print('Time: %.2f min' % ((stop - start) / 60))\n g_model.save('./models/cgan_' + save + '.h5')\n loss = np.array([d_loss_real, d_loss_fake, g_loss])\n np.save('./models/cgan_loss_' + save, loss)\n\n def generate_fakes_givenOne(self, generator, focused, defocused,\n latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], 1)\n x_real = focused[idx, :, :, :]\n input_cond = defocused[idx, :, :, :]\n input_cond = np.repeat(input_cond, n_samples, axis=0)\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n return x_real, x_fake, input_cond[0, :, :, :]\n\n def generate_fakes_givenMany(self, generator, focused, defocused,\n latent_dim, n_examples):\n n_samples = n_examples - 2\n x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape\n [2], focused.shape[3]))\n input_cond_many = np.zeros((n_examples, focused.shape[1], focused.\n shape[2], focused.shape[3]))\n x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1],\n focused.shape[2], focused.shape[3]))\n for i in range(n_examples):\n x_real_many[i, :, :, :], x_fake_many[i, :, :, :, :\n ], input_cond_many[i, :, :, :] = self.generate_fakes_givenOne(\n generator, focused, defocused, latent_dim, n_samples)\n return x_real_many, x_fake_many, input_cond_many\n",
"step-4": "<mask token>\nimport numpy as np\nimport timeit\nimport matplotlib.pyplot as plt\nfrom numpy import expand_dims, zeros, ones\nfrom numpy.random import randn, randint\nfrom keras.models import load_model\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.layers import Input, Reshape, Flatten, Concatenate\nfrom keras.layers import Dense, Conv2D, Conv2DTranspose\nfrom keras.layers import Dropout, LeakyReLU\n\n\nclass cGAN:\n\n def __init__(self, input_dim1, input_dim2, input_dim3, latent_size):\n self.input_dim1 = input_dim1\n self.input_dim2 = input_dim2\n self.input_dim3 = input_dim3\n self.latent_size = latent_size\n\n def discriminator(self):\n input_shape = self.input_dim1, self.input_dim2, self.input_dim3\n input_cond = Input(shape=input_shape)\n input_x = Input(shape=input_shape)\n merge = Concatenate()([input_x, input_cond])\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Flatten()(out)\n out = Dropout(0.5)(out)\n y = Dense(1, activation='sigmoid')(out)\n model = Model([input_x, input_cond], y)\n opt = Adam(lr=0.0002)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[\n 'accuracy'])\n return model\n\n def generator(self):\n image_dim = self.input_dim1\n latent_shape = self.latent_size\n cond_shape = image_dim, image_dim, self.input_dim3\n input_latent = Input(shape=(latent_shape,))\n num_nodes = image_dim * image_dim\n latent = Dense(num_nodes)(input_latent)\n latent = LeakyReLU(alpha=0.2)(latent)\n latent = Reshape((image_dim, image_dim, 1))(latent)\n input_cond = Input(shape=cond_shape)\n cond = input_cond\n merge = Concatenate()([latent, cond])\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same'\n )(out)\n model = Model([input_latent, input_cond], x)\n return model\n\n def combined(self, g_model, d_model):\n d_model.trainable = False\n input_latent, input_cond = g_model.input\n x = g_model.output\n y = d_model([x, input_cond])\n model = Model([input_latent, input_cond], y)\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt)\n return model\n\n def generate_real_samples(self, focused, defocused, n_samples):\n idx = randint(0, focused.shape[0], n_samples)\n x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :]\n y_real = ones((n_samples, 1))\n return [x_real, input_cond], y_real\n\n def generate_latent(self, latent_size, n_samples):\n total_latent = randn(latent_size * n_samples)\n input_z = total_latent.reshape(n_samples, latent_size)\n return input_z\n\n def generate_fake_samples(self, generator, defocused, latent_dim, n_samples\n ):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n y_fake = zeros((n_samples, 1))\n return [x_fake, input_cond], y_fake\n\n def generate_gan_input(self, defocused, latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n y_gan = ones((n_samples, 1))\n return [input_z, input_cond], y_gan\n\n def train(self, g_model, d_model, gan_model, real, input_cond,\n latent_dim, n_epochs, n_batch, save):\n bat_per_epo = int(real.shape[0] / n_batch)\n half_batch = int(n_batch / 2)\n g_loss = np.zeros(n_epochs)\n d_loss_real = np.zeros(n_epochs)\n d_loss_fake = np.zeros(n_epochs)\n for i in range(n_epochs):\n start = timeit.default_timer()\n print('================== Epoch %d ==================\\n' % (i + 1))\n for j in range(bat_per_epo):\n [x_real, input_cond_real], y_real = self.generate_real_samples(\n real, input_cond, half_batch)\n d_loss_real[i], _ = d_model.train_on_batch([x_real,\n input_cond_real], y_real)\n [x_fake, input_cond_fake], y_fake = self.generate_fake_samples(\n g_model, input_cond, latent_dim, half_batch)\n d_loss_fake[i], _ = d_model.train_on_batch([x_fake,\n input_cond_fake], y_fake)\n [z_input, input_cond_gan], y_gan = self.generate_gan_input(\n input_cond, latent_dim, n_batch)\n g_loss[i] = gan_model.train_on_batch([z_input,\n input_cond_gan], y_gan)\n print('Completed: %.f' % np.divide((j + 1) * 100,\n bat_per_epo) + '%')\n print(\n 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f'\n % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\\n')\n stop = timeit.default_timer()\n print('Time: %.2f min' % ((stop - start) / 60))\n g_model.save('./models/cgan_' + save + '.h5')\n loss = np.array([d_loss_real, d_loss_fake, g_loss])\n np.save('./models/cgan_loss_' + save, loss)\n\n def generate_fakes_givenOne(self, generator, focused, defocused,\n latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], 1)\n x_real = focused[idx, :, :, :]\n input_cond = defocused[idx, :, :, :]\n input_cond = np.repeat(input_cond, n_samples, axis=0)\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n return x_real, x_fake, input_cond[0, :, :, :]\n\n def generate_fakes_givenMany(self, generator, focused, defocused,\n latent_dim, n_examples):\n n_samples = n_examples - 2\n x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape\n [2], focused.shape[3]))\n input_cond_many = np.zeros((n_examples, focused.shape[1], focused.\n shape[2], focused.shape[3]))\n x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1],\n focused.shape[2], focused.shape[3]))\n for i in range(n_examples):\n x_real_many[i, :, :, :], x_fake_many[i, :, :, :, :\n ], input_cond_many[i, :, :, :] = self.generate_fakes_givenOne(\n generator, focused, defocused, latent_dim, n_samples)\n return x_real_many, x_fake_many, input_cond_many\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 11 18:50:46 2019\n\n@author: kanfar\n\"\"\"\n\nimport numpy as np\nimport timeit\nimport matplotlib.pyplot as plt\nfrom numpy import expand_dims, zeros, ones\nfrom numpy.random import randn, randint\nfrom keras.models import load_model\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.layers import Input, Reshape, Flatten, Concatenate\nfrom keras.layers import Dense, Conv2D, Conv2DTranspose\nfrom keras.layers import Dropout, LeakyReLU\n\nclass cGAN:\n def __init__(self, input_dim1, input_dim2, input_dim3, latent_size):\n self.input_dim1 = input_dim1\n self.input_dim2 = input_dim2\n self.input_dim3 = input_dim3\n self.latent_size = latent_size\n def discriminator(self):\n #conditional input\n input_shape = (self.input_dim1, self.input_dim2, self.input_dim3)\n input_cond = Input(shape = input_shape)\n #generator output\n input_x = Input(shape = input_shape)\n merge = Concatenate()([input_x, input_cond])\n #downsample\n out = Conv2D(32, (3,3), strides=(2,2), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (3,3), strides=(2,2), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Flatten()(out)\n out = Dropout(0.5)(out)\n y = Dense(1, activation='sigmoid')(out)\n # define model\n model = Model([input_x, input_cond], y)\n # compile model\n opt = Adam(lr=0.0002) #0.0002 and beta_1 0.5\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\n return model\n def generator(self):\n #losing one pixel, figure out later\n image_dim = self.input_dim1\n latent_shape = self.latent_size\n cond_shape = (image_dim, image_dim, self.input_dim3)\n \n input_latent = Input(shape = (latent_shape,))\n num_nodes = image_dim * image_dim\n latent = Dense(num_nodes)(input_latent)\n latent = LeakyReLU(alpha=0.2)(latent)\n latent = Reshape((image_dim,image_dim,1))(latent)\n \n input_cond = Input(shape = cond_shape)\n cond = input_cond\n \n merge = Concatenate()([latent,cond])\n \n # upsample to 14x14\n out = Conv2D(32, (4,4), strides=(1,1), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n # upsample to 28x28\n out = Conv2D(32, (4,4), strides=(1,1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n \n out = Conv2D(32, (4,4), strides=(1,1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n # output\n x = Conv2D(1, (4,4), strides=(1,1), activation='tanh', padding='same')(out) #something key that I don't understand\n # define model\n model = Model([input_latent, input_cond], x)\n return model\n def combined(self, g_model, d_model):\n #model comprised of two models\n # make weights in the discriminator not trainable\n d_model.trainable = False\n # get noise and label inputs from generator model\n input_latent, input_cond = g_model.input #defining the tensors in a short way: this is saying the input to this model is the same size as input to g_model\n # get image output from the generator model\n x = g_model.output\n #can I do x = g_model([input_latent, input_cond]) instead of the above?\n # connect image output and label input from generator as inputs to discriminator\n y = d_model([x, input_cond]) #why this needs to be connected but not the above???? does the first output take model input as default??????? test this\n # define gan model as taking noise and label and outputting a classification\n model = Model([input_latent, input_cond], y)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt)\n return model\n def generate_real_samples(self, focused, defocused, n_samples):\n idx = randint(0, focused.shape[0], n_samples)\n x_real, input_cond = focused[idx,:,:,:], defocused[idx,:,:,:] \n y_real = ones((n_samples,1))\n return [x_real, input_cond], y_real\n \n def generate_latent(self, latent_size, n_samples):\n #generate points in teh latent space\n total_latent = randn(latent_size*n_samples)\n input_z = total_latent.reshape(n_samples, latent_size) \n return input_z\n def generate_fake_samples(self, generator, defocused, latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx,:,:,:]\n input_z = self.generate_latent(latent_dim, n_samples)\n # predict outputs\n x_fake = generator.predict([input_z, input_cond])\n # create class labels\n y_fake = zeros((n_samples, 1))\n return [x_fake, input_cond], y_fake\n def generate_gan_input(self, defocused, latent_dim, n_samples):\n #defocused = data[1,:,:,:]\n #defocused = np.expand_dims(input_cond, axis = -1)\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx,:,:,:]\n input_z = self.generate_latent(latent_dim, n_samples)\n # create class labels\n y_gan = ones((n_samples, 1))\n return [input_z, input_cond], y_gan\n\n def train(self, g_model, d_model, gan_model, real, input_cond, latent_dim, n_epochs, n_batch, save):\n bat_per_epo = int(real.shape[0] / n_batch) #check\n half_batch = int(n_batch / 2)\n g_loss = np.zeros(n_epochs)\n d_loss_real = np.zeros(n_epochs)\n d_loss_fake = np.zeros(n_epochs)\n # manually enumerate epochs\n for i in range(n_epochs):\n start = timeit.default_timer()\n # enumerate batches over the training set\n print('================== Epoch %d ==================\\n' % (i+1))\n for j in range(bat_per_epo):\n # get randomly selected 'real' samples\n [x_real, input_cond_real], y_real = self.generate_real_samples(real, input_cond, half_batch)\n # update discriminator model weights\n d_loss_real[i], _ = d_model.train_on_batch([x_real, input_cond_real], y_real)\n # generate 'fake' examples\n [x_fake, input_cond_fake], y_fake = self.generate_fake_samples(g_model, input_cond, latent_dim, half_batch)\n # update discriminator model weights\n d_loss_fake[i], _ = d_model.train_on_batch([x_fake, input_cond_fake], y_fake)\n # prepare points in latent space as input for the generator\n [z_input, input_cond_gan], y_gan = self.generate_gan_input(input_cond, latent_dim, n_batch)\n # update the generator via the discriminator's error\n g_loss[i] = gan_model.train_on_batch([z_input, input_cond_gan], y_gan)\n # summarize loss on this batch\n print('Completed: %.f' % np.divide((j+1)*100,bat_per_epo) +'%')\n print('Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f' %\n (i+1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\\n')\n stop = timeit.default_timer()\n print('Time: %.2f min' % ((stop - start)/60)) \n # save the generator model\n g_model.save('./models/cgan_'+ save + '.h5') #save somewhere\n # save loss history\n loss = np.array([d_loss_real, d_loss_fake, g_loss])\n np.save('./models/cgan_loss_' + save, loss)\n def generate_fakes_givenOne(self, generator, focused, defocused, latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], 1)\n x_real = focused[idx,:,:,:]\n input_cond = defocused[idx,:,:,:] ##### should last be zero or :?\n input_cond = np.repeat(input_cond, n_samples, axis=0)\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n return x_real, x_fake, input_cond[0,:,:,:]\n def generate_fakes_givenMany(self, generator, focused, defocused, latent_dim, n_examples):\n n_samples = n_examples-2\n x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape[2], focused.shape[3]))\n input_cond_many = np.zeros((n_examples, focused.shape[1], focused.shape[2], focused.shape[3]))\n x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1], focused.shape[2], focused.shape[3]))\n \n for i in range(n_examples):\n x_real_many[i,:,:,:], x_fake_many[i,:,:,:,:], input_cond_many[i,:,:,:] = self.generate_fakes_givenOne(generator, focused, defocused, latent_dim, n_samples)\n return x_real_many, x_fake_many, input_cond_many\n \n ",
"step-ids": [
10,
11,
12,
13,
14
]
}
|
[
10,
11,
12,
13,
14
] |
import logging
from datetime import datetime
import boto3
from pytz import timezone
from mliyweb.api.v1.api_session_limiter import session_is_okay
from mliyweb.api.v1.json_view import JsonView
from mliyweb.dns import deleteDnsEntry
from mliyweb.models import Cluster
from mliyweb.resources.clusters import ClusterService
from mliyweb.settings import AWS_REGION
from mliyweb.utils import log_enter_exit
class UserGroupClusters(JsonView):
'''
Returns a json struct with the current clusters. If the last updated
time in the db is greater than the timeout, it returns the current data
and launches a background thread to refresh and prune the cluster list.
If called with ?forcerefresh as a url argument it'll refresh regardless
of the last updated time.
'''
logger = logging.getLogger('mliyweb.views.UserClusters')
cluster_service = ClusterService()
# global instance refresh time stamp
@log_enter_exit(logger)
def get_data(self, context):
user = self.request.user
try:
if session_is_okay(self.request.session, "group_clusters"):
self.logger.info("Updating clusters in database")
return self.cluster_service.update_by_user_group(user)
else:
self.logger.info("Getting clusters from database")
return self.cluster_service.get_by_user_group(user)
except Exception as e:
self.logger.exception(e)
return []
class UserClusters(JsonView):
# TODO There needs to be a Cluster Launch thread cleanup/rework
logger = logging.getLogger('mliyweb.views.UserClusters')
cluster_service = ClusterService()
@log_enter_exit(logger)
def get_data(self, context):
username = self.request.user.username
try:
if session_is_okay(self.request.session, "user_clusters"):
self.logger.info("Updating clusters in database")
return self.cluster_service.update_by_user(username)
else:
self.logger.info("Getting clusters from database")
return self.cluster_service.get_by_user(username)
except Exception as e:
self.logger.exception(e)
raise
class SingleCluster(JsonView):
logger = logging.getLogger('mliyweb.views.SingleCluster')
cluster_service = ClusterService()
@log_enter_exit(logger)
def get_data(self, context):
cluster_id = self.kwargs['pk']
try:
if session_is_okay(self.request.session, "user_clusters"):
self.logger.info("Updating clusters in database")
return self.cluster_service.update_single_cluster(cluster_id)
else:
self.logger.info("Getting clusters from database")
return self.cluster_service.get_single_cluster(cluster_id)
except Exception as e:
self.logger.exception(e)
raise
class ChangeClusterState(JsonView):
log = logging.getLogger('mliyweb.views.ChangeClusterState')
cluster_service = ClusterService()
@log_enter_exit(log, log_level=10)
def get_data(self,context):
client = boto3.client('cloudformation', region_name=AWS_REGION)
cluster = Cluster.objects.get(cluster_id = self.kwargs['clusterid'])
client.delete_stack(StackName=cluster.stack_id)
if cluster.current_bill:
cluster.current_bill.ongoing = False
cluster.current_bill.end_time = datetime.now(timezone('UTC'))
cluster.current_bill.save()
if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':
deleteDnsEntry(cluster.cluster_id,cluster.master_ip)
else:
deleteDnsEntry(cluster.cluster_id,cluster.master_ip)
cluster.state = "TERMINATED"
cluster.save()
return { 'action' : 'terminate', 'status' : 'ok'}
|
normal
|
{
"blob_id": "f882b73645c6a280a17f40b27c01ecad7e4d85ae",
"index": 5860,
"step-1": "<mask token>\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n",
"step-2": "<mask token>\n\n\nclass UserGroupClusters(JsonView):\n <mask token>\n <mask token>\n <mask token>\n\n @log_enter_exit(logger)\n def get_data(self, context):\n user = self.request.user\n try:\n if session_is_okay(self.request.session, 'group_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user_group(user)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user_group(user)\n except Exception as e:\n self.logger.exception(e)\n return []\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n",
"step-3": "<mask token>\n\n\nclass UserGroupClusters(JsonView):\n <mask token>\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n user = self.request.user\n try:\n if session_is_okay(self.request.session, 'group_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user_group(user)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user_group(user)\n except Exception as e:\n self.logger.exception(e)\n return []\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n",
"step-4": "<mask token>\n\n\nclass UserGroupClusters(JsonView):\n \"\"\"\n\tReturns a json struct with the current clusters. If the last updated\n\ttime in the db is greater than the timeout, it returns the current data\n\tand launches a background thread to refresh and prune the cluster list.\n\n\tIf called with ?forcerefresh as a url argument it'll refresh regardless\n\tof the last updated time.\n\t\"\"\"\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n user = self.request.user\n try:\n if session_is_okay(self.request.session, 'group_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user_group(user)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user_group(user)\n except Exception as e:\n self.logger.exception(e)\n return []\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n",
"step-5": "import logging\nfrom datetime import datetime\n\nimport boto3\nfrom pytz import timezone\n\nfrom mliyweb.api.v1.api_session_limiter import session_is_okay\nfrom mliyweb.api.v1.json_view import JsonView\nfrom mliyweb.dns import deleteDnsEntry\nfrom mliyweb.models import Cluster\nfrom mliyweb.resources.clusters import ClusterService\nfrom mliyweb.settings import AWS_REGION\nfrom mliyweb.utils import log_enter_exit\n\n\nclass UserGroupClusters(JsonView):\n\t'''\n\tReturns a json struct with the current clusters. If the last updated\n\ttime in the db is greater than the timeout, it returns the current data\n\tand launches a background thread to refresh and prune the cluster list.\n\n\tIf called with ?forcerefresh as a url argument it'll refresh regardless\n\tof the last updated time.\n\t'''\n\tlogger = logging.getLogger('mliyweb.views.UserClusters')\n\tcluster_service = ClusterService()\n\n\t# global instance refresh time stamp\n\n\t@log_enter_exit(logger)\n\tdef get_data(self, context):\n\n\t\tuser = self.request.user\n\t\ttry:\n\t\t\tif session_is_okay(self.request.session, \"group_clusters\"):\n\t\t\t\tself.logger.info(\"Updating clusters in database\")\n\t\t\t\treturn self.cluster_service.update_by_user_group(user)\n\t\t\telse:\n\t\t\t\tself.logger.info(\"Getting clusters from database\")\n\t\t\t\treturn self.cluster_service.get_by_user_group(user)\n\n\t\texcept Exception as e:\n\t\t\tself.logger.exception(e)\n\n\t\treturn []\n\n\nclass UserClusters(JsonView):\n\t# TODO There needs to be a Cluster Launch thread cleanup/rework\n\tlogger = logging.getLogger('mliyweb.views.UserClusters')\n\tcluster_service = ClusterService()\n\n\t@log_enter_exit(logger)\n\tdef get_data(self, context):\n\t\tusername = self.request.user.username\n\t\ttry:\n\t\t\tif session_is_okay(self.request.session, \"user_clusters\"):\n\t\t\t\tself.logger.info(\"Updating clusters in database\")\n\t\t\t\treturn self.cluster_service.update_by_user(username)\n\t\t\telse:\n\t\t\t\tself.logger.info(\"Getting clusters from database\")\n\t\t\t\treturn self.cluster_service.get_by_user(username)\n\n\t\texcept Exception as e:\n\t\t\tself.logger.exception(e)\n\t\t\traise\n\nclass SingleCluster(JsonView):\n\tlogger = logging.getLogger('mliyweb.views.SingleCluster')\n\tcluster_service = ClusterService()\n\n\t@log_enter_exit(logger)\n\tdef get_data(self, context):\n\t\tcluster_id = self.kwargs['pk']\n\n\t\ttry:\n\t\t\tif session_is_okay(self.request.session, \"user_clusters\"):\n\t\t\t\tself.logger.info(\"Updating clusters in database\")\n\t\t\t\treturn self.cluster_service.update_single_cluster(cluster_id)\n\t\t\telse:\n\t\t\t\tself.logger.info(\"Getting clusters from database\")\n\t\t\t\treturn self.cluster_service.get_single_cluster(cluster_id)\n\n\t\texcept Exception as e:\n\t\t\tself.logger.exception(e)\n\t\t\traise\n\n\nclass ChangeClusterState(JsonView):\n\tlog = logging.getLogger('mliyweb.views.ChangeClusterState')\n\tcluster_service = ClusterService()\n\n\t@log_enter_exit(log, log_level=10)\n\tdef get_data(self,context):\n\n\t\tclient = boto3.client('cloudformation', region_name=AWS_REGION)\n\t\tcluster = Cluster.objects.get(cluster_id = self.kwargs['clusterid'])\n\n\t\tclient.delete_stack(StackName=cluster.stack_id)\n\t\tif cluster.current_bill:\n\t\t\tcluster.current_bill.ongoing = False\n\t\t\tcluster.current_bill.end_time = datetime.now(timezone('UTC'))\n\t\t\tcluster.current_bill.save()\n\n\t\tif cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n\t\t\tdeleteDnsEntry(cluster.cluster_id,cluster.master_ip)\n\t\telse:\n\t\t\tdeleteDnsEntry(cluster.cluster_id,cluster.master_ip)\n\n\t\tcluster.state = \"TERMINATED\"\n\t\tcluster.save()\n\n\t\treturn { 'action' : 'terminate', 'status' : 'ok'}",
"step-ids": [
9,
11,
12,
13,
15
]
}
|
[
9,
11,
12,
13,
15
] |
<|reserved_special_token_0|>
def plotTensorflowConfmat(confmat, classes):
plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion Matrix')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
for i, j in itertools.product(range(confmat.shape[0]), range(confmat.
shape[1])):
plt.text(j, i, format(confmat[i, j], '.2f'), horizontalalignment=
'center', color='black')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dataset.head()
dataset.describe()
<|reserved_special_token_0|>
dataset.drop(['Amount', 'Time'], axis=1, inplace=True)
dataset.head()
<|reserved_special_token_0|>
for train_index, test_index in SKfold.split(X, Y):
og_X_train, og_X_test = X.iloc[train_index], X.iloc[test_index]
og_Y_train, og_Y_test = Y.iloc[train_index], Y.iloc[test_index]
<|reserved_special_token_0|>
nd_dataset.head()
<|reserved_special_token_0|>
undersample_model.summary()
undersample_model.compile(Adam(lr=0.001), loss=
'sparse_categorical_crossentropy', metrics=['accuracy'])
<|reserved_special_token_0|>
undersample_model.fit(nd_Xtrain, nd_Ytrain, validation_split=0.2, epochs=20,
batch_size=25, shuffle=True, verbose=2, callbacks=[modelcheckpoint])
<|reserved_special_token_0|>
print(confmat)
def plotTensorflowConfmat(confmat, classes):
plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion Matrix')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
for i, j in itertools.product(range(confmat.shape[0]), range(confmat.
shape[1])):
plt.text(j, i, format(confmat[i, j], '.2f'), horizontalalignment=
'center', color='black')
<|reserved_special_token_0|>
plotTensorflowConfmat(confmat, classes)
<|reserved_special_token_0|>
sm_X_train.shape
<|reserved_special_token_0|>
smote_model.summary()
smote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
<|reserved_special_token_0|>
smote_model.fit(sm_X_train, sm_Y_train, validation_split=0.2, batch_size=25,
epochs=20, verbose=2, shuffle=True, callbacks=[modelcheckpoint])
smote_model.save('models/smote_model.h5')
<|reserved_special_token_0|>
print(confmat)
plotTensorflowConfmat(confmat, classes)
<|reserved_special_token_0|>
sm2_X_train.head()
<|reserved_special_token_0|>
sm2_Y_train.head()
<|reserved_special_token_0|>
smote_df.head()
<|reserved_special_token_0|>
sns.heatmap(corr, cmap='coolwarm_r', annot_kws={'size': 20})
plt.show()
corr['Class'].sort_values()
<|reserved_special_token_0|>
f.suptitle('Negative Corr')
for i, feature in enumerate(negative_corr):
sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])
axes[i].set_title(feature)
<|reserved_special_token_0|>
f.suptitle('Positive Corr')
for i, feature in enumerate(positive_corr):
sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])
axes[i].set_title(feature)
for i, feature in enumerate(negative_corr):
fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values
q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)
iqr = q75 - q25
cutoff = iqr * 1.5
upper_limit, lower_limit = q75 + cutoff, q25 - cutoff
outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit
]
smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (
smote_df[feature] < lower_limit)].index)
print(f'outliers removed {len(outlier_list)}')
for i, feature in enumerate(positive_corr):
fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values
q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)
iqr = q75 - q25
cutoff = iqr * 1.5
upper_limit, lower_limit = q75 + cutoff, q25 - cutoff
outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit
]
smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (
smote_df[feature] < lower_limit)].index)
print(f'outliers removed {len(outlier_list)}')
smote_df.shape
<|reserved_special_token_0|>
smote_model.summary()
smote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
<|reserved_special_token_0|>
smote_model.fit(smote_X_train, smote_Y_train, validation_split=0.2, shuffle
=True, batch_size=25, epochs=20, callbacks=[modelcheckpoint])
smote_model.save('models/smote_outliers_removed.h5')
<|reserved_special_token_0|>
print(confmat)
<|reserved_special_token_0|>
plotTensorflowConfmat(confmat, classes)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dataset = pd.read_csv('./dataset/creditcard.csv')
dataset.head()
dataset.describe()
robustScaler = RobustScaler()
dataset['scaled_amount'] = robustScaler.fit_transform(dataset['Amount'].
values.reshape(-1, 1))
dataset['scaled_time'] = robustScaler.fit_transform(dataset['Time'].values.
reshape(-1, 1))
dataset.drop(['Amount', 'Time'], axis=1, inplace=True)
dataset.head()
X = dataset.drop(['Class'], axis=1)
Y = dataset['Class']
SKfold = StratifiedKFold(random_state=42)
for train_index, test_index in SKfold.split(X, Y):
og_X_train, og_X_test = X.iloc[train_index], X.iloc[test_index]
og_Y_train, og_Y_test = Y.iloc[train_index], Y.iloc[test_index]
og_X_train = og_X_train.values
og_X_test = og_X_test.values
og_Y_train = og_Y_train.values
og_Y_test = og_Y_test.values
dataset = dataset.sample(frac=1, random_state=42)
fraud = dataset.loc[dataset['Class'] == 1]
normal = dataset.loc[dataset['Class'] == 0][:492]
nd_dataset = pd.concat([fraud, normal])
nd_dataset = nd_dataset.sample(frac=1, random_state=42)
nd_dataset.head()
nd_X = nd_dataset.drop('Class', axis=1)
nd_Y = nd_dataset['Class']
nd_Xtrain, nd_Xtest, nd_Ytrain, nd_Ytest = train_test_split(nd_X, nd_Y,
random_state=42, test_size=0.2)
nd_Xtrain = nd_Xtrain.values
nd_Xtest = nd_Xtest.values
nd_Ytrain = nd_Ytrain.values
nd_Ytest = nd_Ytest.values
n_inputs = nd_Xtrain.shape[1]
undersample_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),
activation='relu'), Dense(32, activation='relu'), Dense(2, activation=
'softmax')])
undersample_model.summary()
undersample_model.compile(Adam(lr=0.001), loss=
'sparse_categorical_crossentropy', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint('models/undersample_model.h5',
save_best_only=True, monitor='val_acc')
undersample_model.fit(nd_Xtrain, nd_Ytrain, validation_split=0.2, epochs=20,
batch_size=25, shuffle=True, verbose=2, callbacks=[modelcheckpoint])
undersample_pred = undersample_model.predict(og_X_test, verbose=2)
undersample_pred_classes = undersample_model.predict_classes(og_X_test,
verbose=2)
confmat = confusion_matrix(og_Y_test, undersample_pred_classes)
print(confmat)
def plotTensorflowConfmat(confmat, classes):
plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion Matrix')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
for i, j in itertools.product(range(confmat.shape[0]), range(confmat.
shape[1])):
plt.text(j, i, format(confmat[i, j], '.2f'), horizontalalignment=
'center', color='black')
classes = ['Normal', 'Fraud']
plotTensorflowConfmat(confmat, classes)
sm = SMOTE(sampling_strategy='minority', random_state=42)
sm_X_train, sm_Y_train = sm.fit_sample(og_X_train, og_Y_train)
sm_X_train.shape
n_inputs = sm_X_train.shape[1]
smote_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),
activation='relu'), Dense(32, activation='relu'), Dense(2, activation=
'softmax')])
smote_model.summary()
smote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint('models/smote_model.h5', save_best_only=
True, monitor='val_acc')
smote_model.fit(sm_X_train, sm_Y_train, validation_split=0.2, batch_size=25,
epochs=20, verbose=2, shuffle=True, callbacks=[modelcheckpoint])
smote_model.save('models/smote_model.h5')
smote_pred_classes = smote_model.predict_classes(og_X_test)
confmat = confusion_matrix(og_Y_test, smote_pred_classes)
print(confmat)
plotTensorflowConfmat(confmat, classes)
sm2 = SMOTE(sampling_strategy='minority', random_state=42)
sm2_X_train, sm2_Y_train = sm2.fit_sample(og_X_train, og_Y_train)
sm2_X_train = pd.DataFrame(sm2_X_train)
sm2_X_train.head()
sm2_Y_train = pd.DataFrame(sm2_Y_train, columns=['Class'])
sm2_Y_train.head()
smote_df = pd.concat([sm2_X_train, sm2_Y_train], axis=1)
smote_df.head()
smote_df = smote_df.sample(frac=1, random_state=42)
corr = smote_df.corr()
sns.heatmap(corr, cmap='coolwarm_r', annot_kws={'size': 20})
plt.show()
corr['Class'].sort_values()
negative_corr = [13, 11, 9, 15]
positive_corr = [3, 10]
f, axes = plt.subplots(ncols=4, figsize=(20, 4))
f.suptitle('Negative Corr')
for i, feature in enumerate(negative_corr):
sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])
axes[i].set_title(feature)
f, axes = plt.subplots(ncols=2, figsize=(20, 4))
f.suptitle('Positive Corr')
for i, feature in enumerate(positive_corr):
sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])
axes[i].set_title(feature)
for i, feature in enumerate(negative_corr):
fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values
q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)
iqr = q75 - q25
cutoff = iqr * 1.5
upper_limit, lower_limit = q75 + cutoff, q25 - cutoff
outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit
]
smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (
smote_df[feature] < lower_limit)].index)
print(f'outliers removed {len(outlier_list)}')
for i, feature in enumerate(positive_corr):
fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values
q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)
iqr = q75 - q25
cutoff = iqr * 1.5
upper_limit, lower_limit = q75 + cutoff, q25 - cutoff
outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit
]
smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (
smote_df[feature] < lower_limit)].index)
print(f'outliers removed {len(outlier_list)}')
smote_df.shape
smote_X_train = smote_df.drop(['Class'], axis=1)
smote_Y_train = smote_df['Class']
n_inputs = smote_X_train.shape[1]
smote_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),
activation='relu'), Dense(64, activation='relu'), Dense(32, activation=
'relu'), Dense(32, activation='relu'), Dense(2, activation='softmax')])
smote_model.summary()
smote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint('models/smote_outliers_removed.h5',
save_best_only=True, monitor='val_acc')
smote_model.fit(smote_X_train, smote_Y_train, validation_split=0.2, shuffle
=True, batch_size=25, epochs=20, callbacks=[modelcheckpoint])
smote_model.save('models/smote_outliers_removed.h5')
smote_pred_classes = smote_model.predict_classes(og_X_test)
confmat = confusion_matrix(og_Y_test, smote_pred_classes)
print(confmat)
classes = ['normal', 'fraud']
plotTensorflowConfmat(confmat, classes)
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, TruncatedSVD
import matplotlib.patches as mpatches
import time
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import collections
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from imblearn.pipeline import make_pipeline as imbalanced_make_pipeline
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss
from imblearn.metrics import classification_report_imbalanced
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score, classification_report, confusion_matrix, plot_confusion_matrix
from collections import Counter
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split, cross_val_score, GridSearchCV, cross_val_predict
from sklearn.preprocessing import RobustScaler
from scipy.stats import norm
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Activation, Dense
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.callbacks import ModelCheckpoint
import itertools
dataset = pd.read_csv('./dataset/creditcard.csv')
dataset.head()
dataset.describe()
robustScaler = RobustScaler()
dataset['scaled_amount'] = robustScaler.fit_transform(dataset['Amount'].
values.reshape(-1, 1))
dataset['scaled_time'] = robustScaler.fit_transform(dataset['Time'].values.
reshape(-1, 1))
dataset.drop(['Amount', 'Time'], axis=1, inplace=True)
dataset.head()
X = dataset.drop(['Class'], axis=1)
Y = dataset['Class']
SKfold = StratifiedKFold(random_state=42)
for train_index, test_index in SKfold.split(X, Y):
og_X_train, og_X_test = X.iloc[train_index], X.iloc[test_index]
og_Y_train, og_Y_test = Y.iloc[train_index], Y.iloc[test_index]
og_X_train = og_X_train.values
og_X_test = og_X_test.values
og_Y_train = og_Y_train.values
og_Y_test = og_Y_test.values
dataset = dataset.sample(frac=1, random_state=42)
fraud = dataset.loc[dataset['Class'] == 1]
normal = dataset.loc[dataset['Class'] == 0][:492]
nd_dataset = pd.concat([fraud, normal])
nd_dataset = nd_dataset.sample(frac=1, random_state=42)
nd_dataset.head()
nd_X = nd_dataset.drop('Class', axis=1)
nd_Y = nd_dataset['Class']
nd_Xtrain, nd_Xtest, nd_Ytrain, nd_Ytest = train_test_split(nd_X, nd_Y,
random_state=42, test_size=0.2)
nd_Xtrain = nd_Xtrain.values
nd_Xtest = nd_Xtest.values
nd_Ytrain = nd_Ytrain.values
nd_Ytest = nd_Ytest.values
n_inputs = nd_Xtrain.shape[1]
undersample_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),
activation='relu'), Dense(32, activation='relu'), Dense(2, activation=
'softmax')])
undersample_model.summary()
undersample_model.compile(Adam(lr=0.001), loss=
'sparse_categorical_crossentropy', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint('models/undersample_model.h5',
save_best_only=True, monitor='val_acc')
undersample_model.fit(nd_Xtrain, nd_Ytrain, validation_split=0.2, epochs=20,
batch_size=25, shuffle=True, verbose=2, callbacks=[modelcheckpoint])
undersample_pred = undersample_model.predict(og_X_test, verbose=2)
undersample_pred_classes = undersample_model.predict_classes(og_X_test,
verbose=2)
confmat = confusion_matrix(og_Y_test, undersample_pred_classes)
print(confmat)
def plotTensorflowConfmat(confmat, classes):
plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion Matrix')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
for i, j in itertools.product(range(confmat.shape[0]), range(confmat.
shape[1])):
plt.text(j, i, format(confmat[i, j], '.2f'), horizontalalignment=
'center', color='black')
classes = ['Normal', 'Fraud']
plotTensorflowConfmat(confmat, classes)
sm = SMOTE(sampling_strategy='minority', random_state=42)
sm_X_train, sm_Y_train = sm.fit_sample(og_X_train, og_Y_train)
sm_X_train.shape
n_inputs = sm_X_train.shape[1]
smote_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),
activation='relu'), Dense(32, activation='relu'), Dense(2, activation=
'softmax')])
smote_model.summary()
smote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint('models/smote_model.h5', save_best_only=
True, monitor='val_acc')
smote_model.fit(sm_X_train, sm_Y_train, validation_split=0.2, batch_size=25,
epochs=20, verbose=2, shuffle=True, callbacks=[modelcheckpoint])
smote_model.save('models/smote_model.h5')
smote_pred_classes = smote_model.predict_classes(og_X_test)
confmat = confusion_matrix(og_Y_test, smote_pred_classes)
print(confmat)
plotTensorflowConfmat(confmat, classes)
sm2 = SMOTE(sampling_strategy='minority', random_state=42)
sm2_X_train, sm2_Y_train = sm2.fit_sample(og_X_train, og_Y_train)
sm2_X_train = pd.DataFrame(sm2_X_train)
sm2_X_train.head()
sm2_Y_train = pd.DataFrame(sm2_Y_train, columns=['Class'])
sm2_Y_train.head()
smote_df = pd.concat([sm2_X_train, sm2_Y_train], axis=1)
smote_df.head()
smote_df = smote_df.sample(frac=1, random_state=42)
corr = smote_df.corr()
sns.heatmap(corr, cmap='coolwarm_r', annot_kws={'size': 20})
plt.show()
corr['Class'].sort_values()
negative_corr = [13, 11, 9, 15]
positive_corr = [3, 10]
f, axes = plt.subplots(ncols=4, figsize=(20, 4))
f.suptitle('Negative Corr')
for i, feature in enumerate(negative_corr):
sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])
axes[i].set_title(feature)
f, axes = plt.subplots(ncols=2, figsize=(20, 4))
f.suptitle('Positive Corr')
for i, feature in enumerate(positive_corr):
sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])
axes[i].set_title(feature)
for i, feature in enumerate(negative_corr):
fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values
q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)
iqr = q75 - q25
cutoff = iqr * 1.5
upper_limit, lower_limit = q75 + cutoff, q25 - cutoff
outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit
]
smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (
smote_df[feature] < lower_limit)].index)
print(f'outliers removed {len(outlier_list)}')
for i, feature in enumerate(positive_corr):
fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values
q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)
iqr = q75 - q25
cutoff = iqr * 1.5
upper_limit, lower_limit = q75 + cutoff, q25 - cutoff
outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit
]
smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (
smote_df[feature] < lower_limit)].index)
print(f'outliers removed {len(outlier_list)}')
smote_df.shape
smote_X_train = smote_df.drop(['Class'], axis=1)
smote_Y_train = smote_df['Class']
n_inputs = smote_X_train.shape[1]
smote_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),
activation='relu'), Dense(64, activation='relu'), Dense(32, activation=
'relu'), Dense(32, activation='relu'), Dense(2, activation='softmax')])
smote_model.summary()
smote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint('models/smote_outliers_removed.h5',
save_best_only=True, monitor='val_acc')
smote_model.fit(smote_X_train, smote_Y_train, validation_split=0.2, shuffle
=True, batch_size=25, epochs=20, callbacks=[modelcheckpoint])
smote_model.save('models/smote_outliers_removed.h5')
smote_pred_classes = smote_model.predict_classes(og_X_test)
confmat = confusion_matrix(og_Y_test, smote_pred_classes)
print(confmat)
classes = ['normal', 'fraud']
plotTensorflowConfmat(confmat, classes)
<|reserved_special_token_1|>
# %%
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, TruncatedSVD
import matplotlib.patches as mpatches
import time
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import collections
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from imblearn.pipeline import make_pipeline as imbalanced_make_pipeline
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss
from imblearn.metrics import classification_report_imbalanced
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score, classification_report, confusion_matrix, plot_confusion_matrix
from collections import Counter
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split, cross_val_score, GridSearchCV, cross_val_predict
from sklearn.preprocessing import RobustScaler
from scipy.stats import norm
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Activation, Dense
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.callbacks import ModelCheckpoint
import itertools
# %%
dataset = pd.read_csv('./dataset/creditcard.csv')
dataset.head()
# %%
dataset.describe()
# %%
robustScaler = RobustScaler()
dataset['scaled_amount'] = robustScaler.fit_transform(
dataset['Amount'].values.reshape(-1, 1))
dataset['scaled_time'] = robustScaler.fit_transform(
dataset['Time'].values.reshape(-1, 1))
# %%
dataset.drop(['Amount', 'Time'], axis=1, inplace=True)
dataset.head()
# %%
X = dataset.drop(['Class'], axis=1)
Y = dataset['Class']
# %%
SKfold = StratifiedKFold(random_state=42)
for train_index, test_index in SKfold.split(X, Y):
og_X_train, og_X_test = X.iloc[train_index], X.iloc[test_index]
og_Y_train, og_Y_test = Y.iloc[train_index], Y.iloc[test_index]
# %%
og_X_train = og_X_train.values
og_X_test = og_X_test.values
og_Y_train = og_Y_train.values
og_Y_test = og_Y_test.values
# %%
dataset = dataset.sample(frac=1, random_state=42)
fraud = dataset.loc[dataset['Class'] == 1]
normal = dataset.loc[dataset['Class'] == 0][:492]
nd_dataset = pd.concat([fraud, normal])
nd_dataset = nd_dataset.sample(frac=1, random_state=42)
nd_dataset.head()
# %%
nd_X = nd_dataset.drop("Class", axis=1)
nd_Y = nd_dataset["Class"]
# %%
nd_Xtrain, nd_Xtest, nd_Ytrain, nd_Ytest = train_test_split(
nd_X, nd_Y, random_state=42, test_size=0.2)
nd_Xtrain = nd_Xtrain.values
nd_Xtest = nd_Xtest.values
nd_Ytrain = nd_Ytrain.values
nd_Ytest = nd_Ytest.values
# %%
n_inputs = nd_Xtrain.shape[1]
undersample_model = Sequential([
Dense(n_inputs, input_shape=(n_inputs,), activation="relu"),
Dense(32, activation="relu"),
Dense(2, activation="softmax")
])
# %%
undersample_model.summary()
# %%
undersample_model.compile(
Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=["accuracy"])
modelcheckpoint = ModelCheckpoint(
"models/undersample_model.h5", save_best_only=True, monitor="val_acc")
undersample_model.fit(nd_Xtrain, nd_Ytrain, validation_split=0.2, epochs=20,
batch_size=25, shuffle=True, verbose=2, callbacks=[modelcheckpoint])
# %%
undersample_pred = undersample_model.predict(og_X_test, verbose=2)
# %%
undersample_pred_classes = undersample_model.predict_classes(
og_X_test, verbose=2)
# %%
confmat = confusion_matrix(og_Y_test, undersample_pred_classes)
print(confmat)
# %%
def plotTensorflowConfmat(confmat, classes):
plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
for i, j in itertools.product(range(confmat.shape[0]), range(confmat.shape[1])):
plt.text(j, i, format(confmat[i, j], '.2f'),
horizontalalignment='center', color='black')
# %%
classes = ["Normal", "Fraud"]
plotTensorflowConfmat(confmat, classes)
# %%
sm = SMOTE(sampling_strategy="minority", random_state=42)
sm_X_train, sm_Y_train = sm.fit_sample(og_X_train, og_Y_train)
# %%
sm_X_train.shape
# %%
n_inputs = sm_X_train.shape[1]
smote_model = Sequential([
Dense(n_inputs, input_shape=(n_inputs,), activation='relu'),
Dense(32, activation='relu'),
Dense(2, activation='softmax')
])
# %%
smote_model.summary()
# %%
smote_model.compile(
Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(
'models/smote_model.h5', save_best_only=True, monitor='val_acc')
smote_model.fit(sm_X_train, sm_Y_train, validation_split=0.2, batch_size=25,
epochs=20, verbose=2, shuffle=True, callbacks=[modelcheckpoint])
# %%
smote_model.save('models/smote_model.h5')
# %%
smote_pred_classes = smote_model.predict_classes(og_X_test)
# %%
confmat = confusion_matrix(og_Y_test, smote_pred_classes)
print(confmat)
# %%
plotTensorflowConfmat(confmat, classes)
# %%
sm2 = SMOTE(sampling_strategy="minority", random_state=42)
# %%
sm2_X_train, sm2_Y_train = sm2.fit_sample(og_X_train, og_Y_train)
sm2_X_train = pd.DataFrame(sm2_X_train)
sm2_X_train.head()
# %%
sm2_Y_train = pd.DataFrame(sm2_Y_train, columns=["Class"])
sm2_Y_train.head()
# %%
smote_df = pd.concat([sm2_X_train, sm2_Y_train], axis=1)
smote_df.head()
# %%
smote_df = smote_df.sample(frac=1, random_state=42)
# %%
corr = smote_df.corr()
sns.heatmap(corr, cmap='coolwarm_r', annot_kws={'size': 20})
plt.show()
# %%
corr["Class"].sort_values()
# %%
negative_corr = [13, 11, 9, 15]
positive_corr = [3, 10]
# %%
f, axes = plt.subplots(ncols=4, figsize=(20, 4))
f.suptitle("Negative Corr")
for i, feature in enumerate(negative_corr):
sns.boxplot(x="Class", y=feature, data=smote_df, ax=axes[i])
axes[i].set_title(feature)
# %%
f, axes = plt.subplots(ncols=2, figsize=(20, 4))
f.suptitle("Positive Corr")
for i, feature in enumerate(positive_corr):
sns.boxplot(x="Class", y=feature, data=smote_df, ax=axes[i])
axes[i].set_title(feature)
# %%
for i, feature in enumerate(negative_corr):
fraud_dist = smote_df[feature].loc[smote_df["Class"] == 1].values
q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)
iqr = q75-q25
cutoff = iqr*1.5
upper_limit, lower_limit = q75+cutoff, q25-cutoff
outlier_list = [x for x in fraud_dist if x <
lower_limit or x > upper_limit]
smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (
smote_df[feature] < lower_limit)].index)
print(f"outliers removed {len(outlier_list)}")
# %%
for i, feature in enumerate(positive_corr):
fraud_dist = smote_df[feature].loc[smote_df["Class"] == 1].values
q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)
iqr = q75-q25
cutoff = iqr*1.5
upper_limit, lower_limit = q75+cutoff, q25-cutoff
outlier_list = [x for x in fraud_dist if x <
lower_limit or x > upper_limit]
smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (
smote_df[feature] < lower_limit)].index)
print(f"outliers removed {len(outlier_list)}")
# %%
smote_df.shape
# %%
smote_X_train = smote_df.drop(["Class"], axis=1)
smote_Y_train = smote_df["Class"]
# %%
n_inputs = smote_X_train.shape[1]
smote_model = Sequential([
Dense(n_inputs, input_shape=(n_inputs,), activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(32, activation='relu'),
Dense(2, activation='softmax')
])
# %%
smote_model.summary()
# %%
smote_model.compile(
Adam(lr=0.001), loss="sparse_categorical_crossentropy", metrics=["accuracy"])
modelcheckpoint = ModelCheckpoint(
"models/smote_outliers_removed.h5", save_best_only=True, monitor="val_acc")
smote_model.fit(smote_X_train, smote_Y_train, validation_split=0.2,
shuffle=True, batch_size=25, epochs=20, callbacks=[modelcheckpoint])
# %%
smote_model.save("models/smote_outliers_removed.h5")
# %%
smote_pred_classes = smote_model.predict_classes(og_X_test)
# %%
confmat = confusion_matrix(og_Y_test, smote_pred_classes)
print(confmat)
# %%
classes = ["normal", "fraud"]
plotTensorflowConfmat(confmat, classes)
# %%
|
flexible
|
{
"blob_id": "3923aed29006b4290437f2b0e11667c702da3241",
"index": 4605,
"step-1": "<mask token>\n\n\ndef plotTensorflowConfmat(confmat, classes):\n plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title('Confusion Matrix')\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n for i, j in itertools.product(range(confmat.shape[0]), range(confmat.\n shape[1])):\n plt.text(j, i, format(confmat[i, j], '.2f'), horizontalalignment=\n 'center', color='black')\n\n\n<mask token>\n",
"step-2": "<mask token>\ndataset.head()\ndataset.describe()\n<mask token>\ndataset.drop(['Amount', 'Time'], axis=1, inplace=True)\ndataset.head()\n<mask token>\nfor train_index, test_index in SKfold.split(X, Y):\n og_X_train, og_X_test = X.iloc[train_index], X.iloc[test_index]\n og_Y_train, og_Y_test = Y.iloc[train_index], Y.iloc[test_index]\n<mask token>\nnd_dataset.head()\n<mask token>\nundersample_model.summary()\nundersample_model.compile(Adam(lr=0.001), loss=\n 'sparse_categorical_crossentropy', metrics=['accuracy'])\n<mask token>\nundersample_model.fit(nd_Xtrain, nd_Ytrain, validation_split=0.2, epochs=20,\n batch_size=25, shuffle=True, verbose=2, callbacks=[modelcheckpoint])\n<mask token>\nprint(confmat)\n\n\ndef plotTensorflowConfmat(confmat, classes):\n plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title('Confusion Matrix')\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n for i, j in itertools.product(range(confmat.shape[0]), range(confmat.\n shape[1])):\n plt.text(j, i, format(confmat[i, j], '.2f'), horizontalalignment=\n 'center', color='black')\n\n\n<mask token>\nplotTensorflowConfmat(confmat, classes)\n<mask token>\nsm_X_train.shape\n<mask token>\nsmote_model.summary()\nsmote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n<mask token>\nsmote_model.fit(sm_X_train, sm_Y_train, validation_split=0.2, batch_size=25,\n epochs=20, verbose=2, shuffle=True, callbacks=[modelcheckpoint])\nsmote_model.save('models/smote_model.h5')\n<mask token>\nprint(confmat)\nplotTensorflowConfmat(confmat, classes)\n<mask token>\nsm2_X_train.head()\n<mask token>\nsm2_Y_train.head()\n<mask token>\nsmote_df.head()\n<mask token>\nsns.heatmap(corr, cmap='coolwarm_r', annot_kws={'size': 20})\nplt.show()\ncorr['Class'].sort_values()\n<mask token>\nf.suptitle('Negative Corr')\nfor i, feature in enumerate(negative_corr):\n sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])\n axes[i].set_title(feature)\n<mask token>\nf.suptitle('Positive Corr')\nfor i, feature in enumerate(positive_corr):\n sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])\n axes[i].set_title(feature)\nfor i, feature in enumerate(negative_corr):\n fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values\n q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)\n iqr = q75 - q25\n cutoff = iqr * 1.5\n upper_limit, lower_limit = q75 + cutoff, q25 - cutoff\n outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit\n ]\n smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (\n smote_df[feature] < lower_limit)].index)\n print(f'outliers removed {len(outlier_list)}')\nfor i, feature in enumerate(positive_corr):\n fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values\n q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)\n iqr = q75 - q25\n cutoff = iqr * 1.5\n upper_limit, lower_limit = q75 + cutoff, q25 - cutoff\n outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit\n ]\n smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (\n smote_df[feature] < lower_limit)].index)\n print(f'outliers removed {len(outlier_list)}')\nsmote_df.shape\n<mask token>\nsmote_model.summary()\nsmote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n<mask token>\nsmote_model.fit(smote_X_train, smote_Y_train, validation_split=0.2, shuffle\n =True, batch_size=25, epochs=20, callbacks=[modelcheckpoint])\nsmote_model.save('models/smote_outliers_removed.h5')\n<mask token>\nprint(confmat)\n<mask token>\nplotTensorflowConfmat(confmat, classes)\n",
"step-3": "<mask token>\ndataset = pd.read_csv('./dataset/creditcard.csv')\ndataset.head()\ndataset.describe()\nrobustScaler = RobustScaler()\ndataset['scaled_amount'] = robustScaler.fit_transform(dataset['Amount'].\n values.reshape(-1, 1))\ndataset['scaled_time'] = robustScaler.fit_transform(dataset['Time'].values.\n reshape(-1, 1))\ndataset.drop(['Amount', 'Time'], axis=1, inplace=True)\ndataset.head()\nX = dataset.drop(['Class'], axis=1)\nY = dataset['Class']\nSKfold = StratifiedKFold(random_state=42)\nfor train_index, test_index in SKfold.split(X, Y):\n og_X_train, og_X_test = X.iloc[train_index], X.iloc[test_index]\n og_Y_train, og_Y_test = Y.iloc[train_index], Y.iloc[test_index]\nog_X_train = og_X_train.values\nog_X_test = og_X_test.values\nog_Y_train = og_Y_train.values\nog_Y_test = og_Y_test.values\ndataset = dataset.sample(frac=1, random_state=42)\nfraud = dataset.loc[dataset['Class'] == 1]\nnormal = dataset.loc[dataset['Class'] == 0][:492]\nnd_dataset = pd.concat([fraud, normal])\nnd_dataset = nd_dataset.sample(frac=1, random_state=42)\nnd_dataset.head()\nnd_X = nd_dataset.drop('Class', axis=1)\nnd_Y = nd_dataset['Class']\nnd_Xtrain, nd_Xtest, nd_Ytrain, nd_Ytest = train_test_split(nd_X, nd_Y,\n random_state=42, test_size=0.2)\nnd_Xtrain = nd_Xtrain.values\nnd_Xtest = nd_Xtest.values\nnd_Ytrain = nd_Ytrain.values\nnd_Ytest = nd_Ytest.values\nn_inputs = nd_Xtrain.shape[1]\nundersample_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),\n activation='relu'), Dense(32, activation='relu'), Dense(2, activation=\n 'softmax')])\nundersample_model.summary()\nundersample_model.compile(Adam(lr=0.001), loss=\n 'sparse_categorical_crossentropy', metrics=['accuracy'])\nmodelcheckpoint = ModelCheckpoint('models/undersample_model.h5',\n save_best_only=True, monitor='val_acc')\nundersample_model.fit(nd_Xtrain, nd_Ytrain, validation_split=0.2, epochs=20,\n batch_size=25, shuffle=True, verbose=2, callbacks=[modelcheckpoint])\nundersample_pred = undersample_model.predict(og_X_test, verbose=2)\nundersample_pred_classes = undersample_model.predict_classes(og_X_test,\n verbose=2)\nconfmat = confusion_matrix(og_Y_test, undersample_pred_classes)\nprint(confmat)\n\n\ndef plotTensorflowConfmat(confmat, classes):\n plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title('Confusion Matrix')\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n for i, j in itertools.product(range(confmat.shape[0]), range(confmat.\n shape[1])):\n plt.text(j, i, format(confmat[i, j], '.2f'), horizontalalignment=\n 'center', color='black')\n\n\nclasses = ['Normal', 'Fraud']\nplotTensorflowConfmat(confmat, classes)\nsm = SMOTE(sampling_strategy='minority', random_state=42)\nsm_X_train, sm_Y_train = sm.fit_sample(og_X_train, og_Y_train)\nsm_X_train.shape\nn_inputs = sm_X_train.shape[1]\nsmote_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),\n activation='relu'), Dense(32, activation='relu'), Dense(2, activation=\n 'softmax')])\nsmote_model.summary()\nsmote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nmodelcheckpoint = ModelCheckpoint('models/smote_model.h5', save_best_only=\n True, monitor='val_acc')\nsmote_model.fit(sm_X_train, sm_Y_train, validation_split=0.2, batch_size=25,\n epochs=20, verbose=2, shuffle=True, callbacks=[modelcheckpoint])\nsmote_model.save('models/smote_model.h5')\nsmote_pred_classes = smote_model.predict_classes(og_X_test)\nconfmat = confusion_matrix(og_Y_test, smote_pred_classes)\nprint(confmat)\nplotTensorflowConfmat(confmat, classes)\nsm2 = SMOTE(sampling_strategy='minority', random_state=42)\nsm2_X_train, sm2_Y_train = sm2.fit_sample(og_X_train, og_Y_train)\nsm2_X_train = pd.DataFrame(sm2_X_train)\nsm2_X_train.head()\nsm2_Y_train = pd.DataFrame(sm2_Y_train, columns=['Class'])\nsm2_Y_train.head()\nsmote_df = pd.concat([sm2_X_train, sm2_Y_train], axis=1)\nsmote_df.head()\nsmote_df = smote_df.sample(frac=1, random_state=42)\ncorr = smote_df.corr()\nsns.heatmap(corr, cmap='coolwarm_r', annot_kws={'size': 20})\nplt.show()\ncorr['Class'].sort_values()\nnegative_corr = [13, 11, 9, 15]\npositive_corr = [3, 10]\nf, axes = plt.subplots(ncols=4, figsize=(20, 4))\nf.suptitle('Negative Corr')\nfor i, feature in enumerate(negative_corr):\n sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])\n axes[i].set_title(feature)\nf, axes = plt.subplots(ncols=2, figsize=(20, 4))\nf.suptitle('Positive Corr')\nfor i, feature in enumerate(positive_corr):\n sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])\n axes[i].set_title(feature)\nfor i, feature in enumerate(negative_corr):\n fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values\n q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)\n iqr = q75 - q25\n cutoff = iqr * 1.5\n upper_limit, lower_limit = q75 + cutoff, q25 - cutoff\n outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit\n ]\n smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (\n smote_df[feature] < lower_limit)].index)\n print(f'outliers removed {len(outlier_list)}')\nfor i, feature in enumerate(positive_corr):\n fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values\n q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)\n iqr = q75 - q25\n cutoff = iqr * 1.5\n upper_limit, lower_limit = q75 + cutoff, q25 - cutoff\n outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit\n ]\n smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (\n smote_df[feature] < lower_limit)].index)\n print(f'outliers removed {len(outlier_list)}')\nsmote_df.shape\nsmote_X_train = smote_df.drop(['Class'], axis=1)\nsmote_Y_train = smote_df['Class']\nn_inputs = smote_X_train.shape[1]\nsmote_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),\n activation='relu'), Dense(64, activation='relu'), Dense(32, activation=\n 'relu'), Dense(32, activation='relu'), Dense(2, activation='softmax')])\nsmote_model.summary()\nsmote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nmodelcheckpoint = ModelCheckpoint('models/smote_outliers_removed.h5',\n save_best_only=True, monitor='val_acc')\nsmote_model.fit(smote_X_train, smote_Y_train, validation_split=0.2, shuffle\n =True, batch_size=25, epochs=20, callbacks=[modelcheckpoint])\nsmote_model.save('models/smote_outliers_removed.h5')\nsmote_pred_classes = smote_model.predict_classes(og_X_test)\nconfmat = confusion_matrix(og_Y_test, smote_pred_classes)\nprint(confmat)\nclasses = ['normal', 'fraud']\nplotTensorflowConfmat(confmat, classes)\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA, TruncatedSVD\nimport matplotlib.patches as mpatches\nimport time\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport collections\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom imblearn.pipeline import make_pipeline as imbalanced_make_pipeline\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.under_sampling import NearMiss\nfrom imblearn.metrics import classification_report_imbalanced\nfrom sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score, classification_report, confusion_matrix, plot_confusion_matrix\nfrom collections import Counter\nfrom sklearn.model_selection import KFold, StratifiedKFold, train_test_split, cross_val_score, GridSearchCV, cross_val_predict\nfrom sklearn.preprocessing import RobustScaler\nfrom scipy.stats import norm\nimport keras\nfrom keras import backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Activation, Dense\nfrom keras.optimizers import Adam\nfrom keras.metrics import categorical_crossentropy\nfrom keras.callbacks import ModelCheckpoint\nimport itertools\ndataset = pd.read_csv('./dataset/creditcard.csv')\ndataset.head()\ndataset.describe()\nrobustScaler = RobustScaler()\ndataset['scaled_amount'] = robustScaler.fit_transform(dataset['Amount'].\n values.reshape(-1, 1))\ndataset['scaled_time'] = robustScaler.fit_transform(dataset['Time'].values.\n reshape(-1, 1))\ndataset.drop(['Amount', 'Time'], axis=1, inplace=True)\ndataset.head()\nX = dataset.drop(['Class'], axis=1)\nY = dataset['Class']\nSKfold = StratifiedKFold(random_state=42)\nfor train_index, test_index in SKfold.split(X, Y):\n og_X_train, og_X_test = X.iloc[train_index], X.iloc[test_index]\n og_Y_train, og_Y_test = Y.iloc[train_index], Y.iloc[test_index]\nog_X_train = og_X_train.values\nog_X_test = og_X_test.values\nog_Y_train = og_Y_train.values\nog_Y_test = og_Y_test.values\ndataset = dataset.sample(frac=1, random_state=42)\nfraud = dataset.loc[dataset['Class'] == 1]\nnormal = dataset.loc[dataset['Class'] == 0][:492]\nnd_dataset = pd.concat([fraud, normal])\nnd_dataset = nd_dataset.sample(frac=1, random_state=42)\nnd_dataset.head()\nnd_X = nd_dataset.drop('Class', axis=1)\nnd_Y = nd_dataset['Class']\nnd_Xtrain, nd_Xtest, nd_Ytrain, nd_Ytest = train_test_split(nd_X, nd_Y,\n random_state=42, test_size=0.2)\nnd_Xtrain = nd_Xtrain.values\nnd_Xtest = nd_Xtest.values\nnd_Ytrain = nd_Ytrain.values\nnd_Ytest = nd_Ytest.values\nn_inputs = nd_Xtrain.shape[1]\nundersample_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),\n activation='relu'), Dense(32, activation='relu'), Dense(2, activation=\n 'softmax')])\nundersample_model.summary()\nundersample_model.compile(Adam(lr=0.001), loss=\n 'sparse_categorical_crossentropy', metrics=['accuracy'])\nmodelcheckpoint = ModelCheckpoint('models/undersample_model.h5',\n save_best_only=True, monitor='val_acc')\nundersample_model.fit(nd_Xtrain, nd_Ytrain, validation_split=0.2, epochs=20,\n batch_size=25, shuffle=True, verbose=2, callbacks=[modelcheckpoint])\nundersample_pred = undersample_model.predict(og_X_test, verbose=2)\nundersample_pred_classes = undersample_model.predict_classes(og_X_test,\n verbose=2)\nconfmat = confusion_matrix(og_Y_test, undersample_pred_classes)\nprint(confmat)\n\n\ndef plotTensorflowConfmat(confmat, classes):\n plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title('Confusion Matrix')\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n for i, j in itertools.product(range(confmat.shape[0]), range(confmat.\n shape[1])):\n plt.text(j, i, format(confmat[i, j], '.2f'), horizontalalignment=\n 'center', color='black')\n\n\nclasses = ['Normal', 'Fraud']\nplotTensorflowConfmat(confmat, classes)\nsm = SMOTE(sampling_strategy='minority', random_state=42)\nsm_X_train, sm_Y_train = sm.fit_sample(og_X_train, og_Y_train)\nsm_X_train.shape\nn_inputs = sm_X_train.shape[1]\nsmote_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),\n activation='relu'), Dense(32, activation='relu'), Dense(2, activation=\n 'softmax')])\nsmote_model.summary()\nsmote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nmodelcheckpoint = ModelCheckpoint('models/smote_model.h5', save_best_only=\n True, monitor='val_acc')\nsmote_model.fit(sm_X_train, sm_Y_train, validation_split=0.2, batch_size=25,\n epochs=20, verbose=2, shuffle=True, callbacks=[modelcheckpoint])\nsmote_model.save('models/smote_model.h5')\nsmote_pred_classes = smote_model.predict_classes(og_X_test)\nconfmat = confusion_matrix(og_Y_test, smote_pred_classes)\nprint(confmat)\nplotTensorflowConfmat(confmat, classes)\nsm2 = SMOTE(sampling_strategy='minority', random_state=42)\nsm2_X_train, sm2_Y_train = sm2.fit_sample(og_X_train, og_Y_train)\nsm2_X_train = pd.DataFrame(sm2_X_train)\nsm2_X_train.head()\nsm2_Y_train = pd.DataFrame(sm2_Y_train, columns=['Class'])\nsm2_Y_train.head()\nsmote_df = pd.concat([sm2_X_train, sm2_Y_train], axis=1)\nsmote_df.head()\nsmote_df = smote_df.sample(frac=1, random_state=42)\ncorr = smote_df.corr()\nsns.heatmap(corr, cmap='coolwarm_r', annot_kws={'size': 20})\nplt.show()\ncorr['Class'].sort_values()\nnegative_corr = [13, 11, 9, 15]\npositive_corr = [3, 10]\nf, axes = plt.subplots(ncols=4, figsize=(20, 4))\nf.suptitle('Negative Corr')\nfor i, feature in enumerate(negative_corr):\n sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])\n axes[i].set_title(feature)\nf, axes = plt.subplots(ncols=2, figsize=(20, 4))\nf.suptitle('Positive Corr')\nfor i, feature in enumerate(positive_corr):\n sns.boxplot(x='Class', y=feature, data=smote_df, ax=axes[i])\n axes[i].set_title(feature)\nfor i, feature in enumerate(negative_corr):\n fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values\n q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)\n iqr = q75 - q25\n cutoff = iqr * 1.5\n upper_limit, lower_limit = q75 + cutoff, q25 - cutoff\n outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit\n ]\n smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (\n smote_df[feature] < lower_limit)].index)\n print(f'outliers removed {len(outlier_list)}')\nfor i, feature in enumerate(positive_corr):\n fraud_dist = smote_df[feature].loc[smote_df['Class'] == 1].values\n q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)\n iqr = q75 - q25\n cutoff = iqr * 1.5\n upper_limit, lower_limit = q75 + cutoff, q25 - cutoff\n outlier_list = [x for x in fraud_dist if x < lower_limit or x > upper_limit\n ]\n smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (\n smote_df[feature] < lower_limit)].index)\n print(f'outliers removed {len(outlier_list)}')\nsmote_df.shape\nsmote_X_train = smote_df.drop(['Class'], axis=1)\nsmote_Y_train = smote_df['Class']\nn_inputs = smote_X_train.shape[1]\nsmote_model = Sequential([Dense(n_inputs, input_shape=(n_inputs,),\n activation='relu'), Dense(64, activation='relu'), Dense(32, activation=\n 'relu'), Dense(32, activation='relu'), Dense(2, activation='softmax')])\nsmote_model.summary()\nsmote_model.compile(Adam(lr=0.001), loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\nmodelcheckpoint = ModelCheckpoint('models/smote_outliers_removed.h5',\n save_best_only=True, monitor='val_acc')\nsmote_model.fit(smote_X_train, smote_Y_train, validation_split=0.2, shuffle\n =True, batch_size=25, epochs=20, callbacks=[modelcheckpoint])\nsmote_model.save('models/smote_outliers_removed.h5')\nsmote_pred_classes = smote_model.predict_classes(og_X_test)\nconfmat = confusion_matrix(og_Y_test, smote_pred_classes)\nprint(confmat)\nclasses = ['normal', 'fraud']\nplotTensorflowConfmat(confmat, classes)\n",
"step-5": "# %%\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA, TruncatedSVD\nimport matplotlib.patches as mpatches\nimport time\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nimport collections\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom imblearn.pipeline import make_pipeline as imbalanced_make_pipeline\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.under_sampling import NearMiss\nfrom imblearn.metrics import classification_report_imbalanced\nfrom sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score, classification_report, confusion_matrix, plot_confusion_matrix\nfrom collections import Counter\nfrom sklearn.model_selection import KFold, StratifiedKFold, train_test_split, cross_val_score, GridSearchCV, cross_val_predict\nfrom sklearn.preprocessing import RobustScaler\nfrom scipy.stats import norm\n\nimport keras\nfrom keras import backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Activation, Dense\nfrom keras.optimizers import Adam\nfrom keras.metrics import categorical_crossentropy\nfrom keras.callbacks import ModelCheckpoint\n\nimport itertools\n# %%\ndataset = pd.read_csv('./dataset/creditcard.csv')\ndataset.head()\n# %%\ndataset.describe()\n# %%\nrobustScaler = RobustScaler()\ndataset['scaled_amount'] = robustScaler.fit_transform(\n dataset['Amount'].values.reshape(-1, 1))\ndataset['scaled_time'] = robustScaler.fit_transform(\n dataset['Time'].values.reshape(-1, 1))\n# %%\ndataset.drop(['Amount', 'Time'], axis=1, inplace=True)\ndataset.head()\n# %%\nX = dataset.drop(['Class'], axis=1)\nY = dataset['Class']\n# %%\nSKfold = StratifiedKFold(random_state=42)\nfor train_index, test_index in SKfold.split(X, Y):\n og_X_train, og_X_test = X.iloc[train_index], X.iloc[test_index]\n og_Y_train, og_Y_test = Y.iloc[train_index], Y.iloc[test_index]\n\n# %%\nog_X_train = og_X_train.values\nog_X_test = og_X_test.values\nog_Y_train = og_Y_train.values\nog_Y_test = og_Y_test.values\n# %%\ndataset = dataset.sample(frac=1, random_state=42)\nfraud = dataset.loc[dataset['Class'] == 1]\nnormal = dataset.loc[dataset['Class'] == 0][:492]\nnd_dataset = pd.concat([fraud, normal])\nnd_dataset = nd_dataset.sample(frac=1, random_state=42)\nnd_dataset.head()\n# %%\nnd_X = nd_dataset.drop(\"Class\", axis=1)\nnd_Y = nd_dataset[\"Class\"]\n\n# %%\nnd_Xtrain, nd_Xtest, nd_Ytrain, nd_Ytest = train_test_split(\n nd_X, nd_Y, random_state=42, test_size=0.2)\nnd_Xtrain = nd_Xtrain.values\nnd_Xtest = nd_Xtest.values\nnd_Ytrain = nd_Ytrain.values\nnd_Ytest = nd_Ytest.values\n\n# %%\nn_inputs = nd_Xtrain.shape[1]\nundersample_model = Sequential([\n Dense(n_inputs, input_shape=(n_inputs,), activation=\"relu\"),\n Dense(32, activation=\"relu\"),\n Dense(2, activation=\"softmax\")\n])\n# %%\nundersample_model.summary()\n# %%\nundersample_model.compile(\n Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=[\"accuracy\"])\nmodelcheckpoint = ModelCheckpoint(\n \"models/undersample_model.h5\", save_best_only=True, monitor=\"val_acc\")\nundersample_model.fit(nd_Xtrain, nd_Ytrain, validation_split=0.2, epochs=20,\n batch_size=25, shuffle=True, verbose=2, callbacks=[modelcheckpoint])\n\n# %%\nundersample_pred = undersample_model.predict(og_X_test, verbose=2)\n# %%\nundersample_pred_classes = undersample_model.predict_classes(\n og_X_test, verbose=2)\n# %%\nconfmat = confusion_matrix(og_Y_test, undersample_pred_classes)\nprint(confmat)\n# %%\n\n\ndef plotTensorflowConfmat(confmat, classes):\n plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n plt.tight_layout()\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n for i, j in itertools.product(range(confmat.shape[0]), range(confmat.shape[1])):\n plt.text(j, i, format(confmat[i, j], '.2f'),\n horizontalalignment='center', color='black')\n\n\n# %%\nclasses = [\"Normal\", \"Fraud\"]\nplotTensorflowConfmat(confmat, classes)\n\n# %%\nsm = SMOTE(sampling_strategy=\"minority\", random_state=42)\nsm_X_train, sm_Y_train = sm.fit_sample(og_X_train, og_Y_train)\n# %%\nsm_X_train.shape\n# %%\nn_inputs = sm_X_train.shape[1]\nsmote_model = Sequential([\n Dense(n_inputs, input_shape=(n_inputs,), activation='relu'),\n Dense(32, activation='relu'),\n Dense(2, activation='softmax')\n])\n# %%\nsmote_model.summary()\n# %%\nsmote_model.compile(\n Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])\nmodelcheckpoint = ModelCheckpoint(\n 'models/smote_model.h5', save_best_only=True, monitor='val_acc')\nsmote_model.fit(sm_X_train, sm_Y_train, validation_split=0.2, batch_size=25,\n epochs=20, verbose=2, shuffle=True, callbacks=[modelcheckpoint])\n# %%\nsmote_model.save('models/smote_model.h5')\n# %%\nsmote_pred_classes = smote_model.predict_classes(og_X_test)\n# %%\nconfmat = confusion_matrix(og_Y_test, smote_pred_classes)\nprint(confmat)\n# %%\nplotTensorflowConfmat(confmat, classes)\n# %%\nsm2 = SMOTE(sampling_strategy=\"minority\", random_state=42)\n# %%\nsm2_X_train, sm2_Y_train = sm2.fit_sample(og_X_train, og_Y_train)\nsm2_X_train = pd.DataFrame(sm2_X_train)\nsm2_X_train.head()\n# %%\nsm2_Y_train = pd.DataFrame(sm2_Y_train, columns=[\"Class\"])\nsm2_Y_train.head()\n# %%\nsmote_df = pd.concat([sm2_X_train, sm2_Y_train], axis=1)\nsmote_df.head()\n\n# %%\nsmote_df = smote_df.sample(frac=1, random_state=42)\n# %%\ncorr = smote_df.corr()\nsns.heatmap(corr, cmap='coolwarm_r', annot_kws={'size': 20})\nplt.show()\n# %%\ncorr[\"Class\"].sort_values()\n# %%\nnegative_corr = [13, 11, 9, 15]\npositive_corr = [3, 10]\n# %%\nf, axes = plt.subplots(ncols=4, figsize=(20, 4))\nf.suptitle(\"Negative Corr\")\nfor i, feature in enumerate(negative_corr):\n sns.boxplot(x=\"Class\", y=feature, data=smote_df, ax=axes[i])\n axes[i].set_title(feature)\n# %%\nf, axes = plt.subplots(ncols=2, figsize=(20, 4))\nf.suptitle(\"Positive Corr\")\nfor i, feature in enumerate(positive_corr):\n sns.boxplot(x=\"Class\", y=feature, data=smote_df, ax=axes[i])\n axes[i].set_title(feature)\n# %%\nfor i, feature in enumerate(negative_corr):\n fraud_dist = smote_df[feature].loc[smote_df[\"Class\"] == 1].values\n q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)\n iqr = q75-q25\n cutoff = iqr*1.5\n upper_limit, lower_limit = q75+cutoff, q25-cutoff\n outlier_list = [x for x in fraud_dist if x <\n lower_limit or x > upper_limit]\n smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (\n smote_df[feature] < lower_limit)].index)\n print(f\"outliers removed {len(outlier_list)}\")\n\n# %%\nfor i, feature in enumerate(positive_corr):\n fraud_dist = smote_df[feature].loc[smote_df[\"Class\"] == 1].values\n q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)\n iqr = q75-q25\n cutoff = iqr*1.5\n upper_limit, lower_limit = q75+cutoff, q25-cutoff\n outlier_list = [x for x in fraud_dist if x <\n lower_limit or x > upper_limit]\n smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (\n smote_df[feature] < lower_limit)].index)\n print(f\"outliers removed {len(outlier_list)}\")\n# %%\nsmote_df.shape\n# %%\nsmote_X_train = smote_df.drop([\"Class\"], axis=1)\nsmote_Y_train = smote_df[\"Class\"]\n# %%\nn_inputs = smote_X_train.shape[1]\nsmote_model = Sequential([\n Dense(n_inputs, input_shape=(n_inputs,), activation='relu'),\n Dense(64, activation='relu'),\n Dense(32, activation='relu'),\n Dense(32, activation='relu'),\n Dense(2, activation='softmax')\n])\n# %%\nsmote_model.summary()\n# %%\nsmote_model.compile(\n Adam(lr=0.001), loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\nmodelcheckpoint = ModelCheckpoint(\n \"models/smote_outliers_removed.h5\", save_best_only=True, monitor=\"val_acc\")\nsmote_model.fit(smote_X_train, smote_Y_train, validation_split=0.2,\n shuffle=True, batch_size=25, epochs=20, callbacks=[modelcheckpoint])\n\n# %%\nsmote_model.save(\"models/smote_outliers_removed.h5\")\n# %%\nsmote_pred_classes = smote_model.predict_classes(og_X_test)\n# %%\nconfmat = confusion_matrix(og_Y_test, smote_pred_classes)\nprint(confmat)\n# %%\nclasses = [\"normal\", \"fraud\"]\nplotTensorflowConfmat(confmat, classes)\n# %%\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def create_meme(word):
return f'this is your meme NEW VERSION {word}'
|
flexible
|
{
"blob_id": "32b3e65add5fb44320898b682e8f94f1460a32e7",
"index": 628,
"step-1": "<mask token>\n",
"step-2": "def create_meme(word):\n return f'this is your meme NEW VERSION {word}'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(sys.argv[1], 'r') as blastOut:
geneHits = []
orgHits = []
hits = 0.0
for line in blastOut:
hits += 1.0
currHit = line.split()[1]
currGene = currHit.split('_')[-1]
currOrg = currHit.split('_')[0]
geneHits.append(currGene)
orgHits.append(currOrg)
<|reserved_special_token_0|>
for index in uniqueGenes:
if geneHits.count(index) >= 2:
multiHits.append(geneHits.count(index))
<|reserved_special_token_0|>
for index in uniqueOrgs:
if orgHits.count(index) > topCount:
topCount = orgHits.count(index)
hitCounts.append(topCount)
topOrg = index
<|reserved_special_token_0|>
print('\nGenome bin: ' + str(sys.argv[1]))
print('Completeness: ' + str(completeness) + '%')
print('Contamination: ' + str(contamination) + '%')
print('Heterogeneity: ' + str(heterogeneity) + '%\n')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(sys.argv[1], 'r') as blastOut:
geneHits = []
orgHits = []
hits = 0.0
for line in blastOut:
hits += 1.0
currHit = line.split()[1]
currGene = currHit.split('_')[-1]
currOrg = currHit.split('_')[0]
geneHits.append(currGene)
orgHits.append(currOrg)
uniqueGenes = list(set(geneHits))
multiHits = []
for index in uniqueGenes:
if geneHits.count(index) >= 2:
multiHits.append(geneHits.count(index))
contamination = float(sum(multiHits)) / hits * float(len(multiHits))
contamination = round(contamination * 100.0, 2)
uniqueGenes = float(len(uniqueGenes))
completeness = round(uniqueGenes / 73.0 * 100.0, 2)
uniqueOrgs = list(set(orgHits))
topCount = 0
hitCounts = []
topOrg = 'org'
for index in uniqueOrgs:
if orgHits.count(index) > topCount:
topCount = orgHits.count(index)
hitCounts.append(topCount)
topOrg = index
otherCount = float(hits - topCount)
uniqueOrgs = float(len(uniqueOrgs))
heterogeneity = otherCount / float(hits) * uniqueOrgs
heterogeneity = round(heterogeneity * 100.0, 2)
print('\nGenome bin: ' + str(sys.argv[1]))
print('Completeness: ' + str(completeness) + '%')
print('Contamination: ' + str(contamination) + '%')
print('Heterogeneity: ' + str(heterogeneity) + '%\n')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
with open(sys.argv[1], 'r') as blastOut:
geneHits = []
orgHits = []
hits = 0.0
for line in blastOut:
hits += 1.0
currHit = line.split()[1]
currGene = currHit.split('_')[-1]
currOrg = currHit.split('_')[0]
geneHits.append(currGene)
orgHits.append(currOrg)
uniqueGenes = list(set(geneHits))
multiHits = []
for index in uniqueGenes:
if geneHits.count(index) >= 2:
multiHits.append(geneHits.count(index))
contamination = float(sum(multiHits)) / hits * float(len(multiHits))
contamination = round(contamination * 100.0, 2)
uniqueGenes = float(len(uniqueGenes))
completeness = round(uniqueGenes / 73.0 * 100.0, 2)
uniqueOrgs = list(set(orgHits))
topCount = 0
hitCounts = []
topOrg = 'org'
for index in uniqueOrgs:
if orgHits.count(index) > topCount:
topCount = orgHits.count(index)
hitCounts.append(topCount)
topOrg = index
otherCount = float(hits - topCount)
uniqueOrgs = float(len(uniqueOrgs))
heterogeneity = otherCount / float(hits) * uniqueOrgs
heterogeneity = round(heterogeneity * 100.0, 2)
print('\nGenome bin: ' + str(sys.argv[1]))
print('Completeness: ' + str(completeness) + '%')
print('Contamination: ' + str(contamination) + '%')
print('Heterogeneity: ' + str(heterogeneity) + '%\n')
<|reserved_special_token_1|>
#!/usr/bin/python2.7
'''USAGE: completeness.py BLAST_output (tab formatted)
Prints % completeness based on marker gene BLAST of caled genes from a genome
Markers from Lan et al. (2016)
'''
import sys
with open(sys.argv[1],'r') as blastOut:
geneHits = []
orgHits = []
hits = 0.0
for line in blastOut:
hits += 1.0
currHit = line.split()[1]
currGene = currHit.split('_')[-1]
currOrg = currHit.split('_')[0]
geneHits.append(currGene)
orgHits.append(currOrg)
uniqueGenes = list(set(geneHits))
multiHits = []
for index in uniqueGenes:
if geneHits.count(index) >= 2:
multiHits.append(geneHits.count(index))
contamination = (float(sum(multiHits)) / hits) * float(len(multiHits))
contamination = round((contamination * 100.0), 2)
uniqueGenes = float(len(uniqueGenes))
completeness = round(((uniqueGenes / 73.0) * 100.0), 2)
uniqueOrgs = list(set(orgHits))
topCount = 0
hitCounts = []
topOrg = 'org'
for index in uniqueOrgs:
if orgHits.count(index) > topCount:
topCount = orgHits.count(index)
hitCounts.append(topCount)
topOrg = index
otherCount = float(hits - topCount)
uniqueOrgs = float(len(uniqueOrgs))
heterogeneity = (otherCount / float(hits)) * uniqueOrgs
heterogeneity = round((heterogeneity * 100.0), 2)
print('\nGenome bin: ' + str(sys.argv[1]))
print('Completeness: ' + str(completeness) + '%')
print('Contamination: ' + str(contamination) + '%')
print('Heterogeneity: ' + str(heterogeneity) + '%\n')
|
flexible
|
{
"blob_id": "a8659ca7d7a5870fc6f62b3dfee1779e33373e7b",
"index": 8388,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(sys.argv[1], 'r') as blastOut:\n geneHits = []\n orgHits = []\n hits = 0.0\n for line in blastOut:\n hits += 1.0\n currHit = line.split()[1]\n currGene = currHit.split('_')[-1]\n currOrg = currHit.split('_')[0]\n geneHits.append(currGene)\n orgHits.append(currOrg)\n<mask token>\nfor index in uniqueGenes:\n if geneHits.count(index) >= 2:\n multiHits.append(geneHits.count(index))\n<mask token>\nfor index in uniqueOrgs:\n if orgHits.count(index) > topCount:\n topCount = orgHits.count(index)\n hitCounts.append(topCount)\n topOrg = index\n<mask token>\nprint('\\nGenome bin: ' + str(sys.argv[1]))\nprint('Completeness: ' + str(completeness) + '%')\nprint('Contamination: ' + str(contamination) + '%')\nprint('Heterogeneity: ' + str(heterogeneity) + '%\\n')\n",
"step-3": "<mask token>\nwith open(sys.argv[1], 'r') as blastOut:\n geneHits = []\n orgHits = []\n hits = 0.0\n for line in blastOut:\n hits += 1.0\n currHit = line.split()[1]\n currGene = currHit.split('_')[-1]\n currOrg = currHit.split('_')[0]\n geneHits.append(currGene)\n orgHits.append(currOrg)\nuniqueGenes = list(set(geneHits))\nmultiHits = []\nfor index in uniqueGenes:\n if geneHits.count(index) >= 2:\n multiHits.append(geneHits.count(index))\ncontamination = float(sum(multiHits)) / hits * float(len(multiHits))\ncontamination = round(contamination * 100.0, 2)\nuniqueGenes = float(len(uniqueGenes))\ncompleteness = round(uniqueGenes / 73.0 * 100.0, 2)\nuniqueOrgs = list(set(orgHits))\ntopCount = 0\nhitCounts = []\ntopOrg = 'org'\nfor index in uniqueOrgs:\n if orgHits.count(index) > topCount:\n topCount = orgHits.count(index)\n hitCounts.append(topCount)\n topOrg = index\notherCount = float(hits - topCount)\nuniqueOrgs = float(len(uniqueOrgs))\nheterogeneity = otherCount / float(hits) * uniqueOrgs\nheterogeneity = round(heterogeneity * 100.0, 2)\nprint('\\nGenome bin: ' + str(sys.argv[1]))\nprint('Completeness: ' + str(completeness) + '%')\nprint('Contamination: ' + str(contamination) + '%')\nprint('Heterogeneity: ' + str(heterogeneity) + '%\\n')\n",
"step-4": "<mask token>\nimport sys\nwith open(sys.argv[1], 'r') as blastOut:\n geneHits = []\n orgHits = []\n hits = 0.0\n for line in blastOut:\n hits += 1.0\n currHit = line.split()[1]\n currGene = currHit.split('_')[-1]\n currOrg = currHit.split('_')[0]\n geneHits.append(currGene)\n orgHits.append(currOrg)\nuniqueGenes = list(set(geneHits))\nmultiHits = []\nfor index in uniqueGenes:\n if geneHits.count(index) >= 2:\n multiHits.append(geneHits.count(index))\ncontamination = float(sum(multiHits)) / hits * float(len(multiHits))\ncontamination = round(contamination * 100.0, 2)\nuniqueGenes = float(len(uniqueGenes))\ncompleteness = round(uniqueGenes / 73.0 * 100.0, 2)\nuniqueOrgs = list(set(orgHits))\ntopCount = 0\nhitCounts = []\ntopOrg = 'org'\nfor index in uniqueOrgs:\n if orgHits.count(index) > topCount:\n topCount = orgHits.count(index)\n hitCounts.append(topCount)\n topOrg = index\notherCount = float(hits - topCount)\nuniqueOrgs = float(len(uniqueOrgs))\nheterogeneity = otherCount / float(hits) * uniqueOrgs\nheterogeneity = round(heterogeneity * 100.0, 2)\nprint('\\nGenome bin: ' + str(sys.argv[1]))\nprint('Completeness: ' + str(completeness) + '%')\nprint('Contamination: ' + str(contamination) + '%')\nprint('Heterogeneity: ' + str(heterogeneity) + '%\\n')\n",
"step-5": "#!/usr/bin/python2.7\n'''USAGE: completeness.py BLAST_output (tab formatted)\nPrints % completeness based on marker gene BLAST of caled genes from a genome\nMarkers from Lan et al. (2016)\n'''\nimport sys\n\nwith open(sys.argv[1],'r') as blastOut:\n\n\tgeneHits = []\n\torgHits = []\n\thits = 0.0\n\tfor line in blastOut:\n\t\thits += 1.0\n\t\tcurrHit = line.split()[1]\n\t\tcurrGene = currHit.split('_')[-1]\n\t\tcurrOrg = currHit.split('_')[0]\n\t\tgeneHits.append(currGene)\n\t\torgHits.append(currOrg)\n\n\nuniqueGenes = list(set(geneHits))\nmultiHits = []\nfor index in uniqueGenes:\n\tif geneHits.count(index) >= 2:\n\t\tmultiHits.append(geneHits.count(index))\ncontamination = (float(sum(multiHits)) / hits) * float(len(multiHits))\ncontamination = round((contamination * 100.0), 2)\n\nuniqueGenes = float(len(uniqueGenes))\ncompleteness = round(((uniqueGenes / 73.0) * 100.0), 2)\n\nuniqueOrgs = list(set(orgHits))\ntopCount = 0\nhitCounts = []\ntopOrg = 'org'\nfor index in uniqueOrgs:\n\tif orgHits.count(index) > topCount:\n\t\ttopCount = orgHits.count(index)\n\t\thitCounts.append(topCount)\n\t\ttopOrg = index\n\notherCount = float(hits - topCount)\nuniqueOrgs = float(len(uniqueOrgs))\nheterogeneity = (otherCount / float(hits)) * uniqueOrgs\nheterogeneity = round((heterogeneity * 100.0), 2)\n\n\nprint('\\nGenome bin: ' + str(sys.argv[1]))\nprint('Completeness: ' + str(completeness) + '%')\nprint('Contamination: ' + str(contamination) + '%')\nprint('Heterogeneity: ' + str(heterogeneity) + '%\\n')\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def sigmoid(X):
""" Applies the logistic function to x, element-wise. """
return 1 / (1 + np.exp(-X))
def x_strich(X):
return np.column_stack((np.ones(len(X)), X))
<|reserved_special_token_0|>
def rescale_model(thetas, mean, std):
thetas_rescaled = np.zeros(thetas.shape[0])
for count, value in enumerate(thetas):
if count == 0:
thetas_rescaled[0] = value + thetas[1] * (mean / std)
return thetas_rescaled
<|reserved_special_token_0|>
def cross_entropy(X, y):
"""Implements cross-entropy as a function costs(theta) on given traning data
Args:
h: the hypothesis as function
x: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
Returns:
lambda costs(theta) that models the cross-entropy for each x^i
"""
return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (
1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)
<|reserved_special_token_0|>
def L2_regularization_cost(X, theta, lambda_reg):
return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))
<|reserved_special_token_0|>
def mean_cross_entropy_costs(X, y, lambda_reg=0.0):
"""Implements mean cross-entropy as a function J(theta) on given traning
data
Args:
X: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
hypothesis: the hypothesis as function
cost_func: cost function
Returns:
lambda J(theta) that models the mean cross-entropy
"""
return lambda theta: np.mean(cross_entropy(X, y)(theta)
) + L2_regularization_cost(X, theta, lambda_reg)
def plot_progress(fig, costs, learning_rate, lambda_reg):
"""Plots the costs over the iterations
Args:
costs: history of costs
"""
ax = fig.add_subplot(111)
ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(
learning_rate) + ' __ Lambda: ' + str(lambda_reg))
ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,
mode='expand', borderaxespad=0.0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sigmoid(X):
""" Applies the logistic function to x, element-wise. """
return 1 / (1 + np.exp(-X))
def x_strich(X):
return np.column_stack((np.ones(len(X)), X))
<|reserved_special_token_0|>
def rescale_model(thetas, mean, std):
thetas_rescaled = np.zeros(thetas.shape[0])
for count, value in enumerate(thetas):
if count == 0:
thetas_rescaled[0] = value + thetas[1] * (mean / std)
return thetas_rescaled
<|reserved_special_token_0|>
def cross_entropy(X, y):
"""Implements cross-entropy as a function costs(theta) on given traning data
Args:
h: the hypothesis as function
x: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
Returns:
lambda costs(theta) that models the cross-entropy for each x^i
"""
return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (
1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)
def compute_new_theta(X, y, theta, learning_rate, lambda_reg):
"""Updates learnable parameters theta
The update is done by calculating the partial derivities of
the cost function including the linear hypothesis. The
gradients scaled by a scalar are subtracted from the given
theta values.
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
hypothesis: the hypothesis as function
Returns:
theta: Updated theta_0
"""
thetas = np.zeros(len(theta))
thetas = theta * (1 - learning_rate * (lambda_reg / len(X))
) - learning_rate / len(X) * np.sum((logistic_hypothesis(theta)(X) -
y) * x_strich(X).T, axis=1)
return thetas
def L2_regularization_cost(X, theta, lambda_reg):
return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))
def gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):
"""Minimize theta values of a logistic model based on cross-entropy cost function
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
num_iters: number of iterations updating thetas
lambda_reg: regularization strength
Returns:
history_cost: cost after each iteration
history_theta: Updated theta values after each iteration
"""
thetas = [theta]
cost = np.zeros(num_iters)
J = mean_cross_entropy_costs(X, y, lambda_reg)
cost[0] = J(thetas[0])
for i in range(1, num_iters):
thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate,
lambda_reg))
cost[i] = J(thetas[i])
return cost, thetas
def mean_cross_entropy_costs(X, y, lambda_reg=0.0):
"""Implements mean cross-entropy as a function J(theta) on given traning
data
Args:
X: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
hypothesis: the hypothesis as function
cost_func: cost function
Returns:
lambda J(theta) that models the mean cross-entropy
"""
return lambda theta: np.mean(cross_entropy(X, y)(theta)
) + L2_regularization_cost(X, theta, lambda_reg)
def plot_progress(fig, costs, learning_rate, lambda_reg):
"""Plots the costs over the iterations
Args:
costs: history of costs
"""
ax = fig.add_subplot(111)
ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(
learning_rate) + ' __ Lambda: ' + str(lambda_reg))
ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,
mode='expand', borderaxespad=0.0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sigmoid(X):
""" Applies the logistic function to x, element-wise. """
return 1 / (1 + np.exp(-X))
def x_strich(X):
return np.column_stack((np.ones(len(X)), X))
<|reserved_special_token_0|>
def rescale_model(thetas, mean, std):
thetas_rescaled = np.zeros(thetas.shape[0])
for count, value in enumerate(thetas):
if count == 0:
thetas_rescaled[0] = value + thetas[1] * (mean / std)
return thetas_rescaled
def logistic_hypothesis(theta):
"""Combines given list argument in a logistic equation and returns it as a
function
Args:
thetas: list of coefficients
Returns:
lambda that models a logistc function based on thetas and x
"""
return lambda X: sigmoid(np.dot(x_strich(X), theta))
def cross_entropy(X, y):
"""Implements cross-entropy as a function costs(theta) on given traning data
Args:
h: the hypothesis as function
x: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
Returns:
lambda costs(theta) that models the cross-entropy for each x^i
"""
return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (
1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)
def compute_new_theta(X, y, theta, learning_rate, lambda_reg):
"""Updates learnable parameters theta
The update is done by calculating the partial derivities of
the cost function including the linear hypothesis. The
gradients scaled by a scalar are subtracted from the given
theta values.
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
hypothesis: the hypothesis as function
Returns:
theta: Updated theta_0
"""
thetas = np.zeros(len(theta))
thetas = theta * (1 - learning_rate * (lambda_reg / len(X))
) - learning_rate / len(X) * np.sum((logistic_hypothesis(theta)(X) -
y) * x_strich(X).T, axis=1)
return thetas
def L2_regularization_cost(X, theta, lambda_reg):
return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))
def gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):
"""Minimize theta values of a logistic model based on cross-entropy cost function
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
num_iters: number of iterations updating thetas
lambda_reg: regularization strength
Returns:
history_cost: cost after each iteration
history_theta: Updated theta values after each iteration
"""
thetas = [theta]
cost = np.zeros(num_iters)
J = mean_cross_entropy_costs(X, y, lambda_reg)
cost[0] = J(thetas[0])
for i in range(1, num_iters):
thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate,
lambda_reg))
cost[i] = J(thetas[i])
return cost, thetas
def mean_cross_entropy_costs(X, y, lambda_reg=0.0):
"""Implements mean cross-entropy as a function J(theta) on given traning
data
Args:
X: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
hypothesis: the hypothesis as function
cost_func: cost function
Returns:
lambda J(theta) that models the mean cross-entropy
"""
return lambda theta: np.mean(cross_entropy(X, y)(theta)
) + L2_regularization_cost(X, theta, lambda_reg)
def plot_progress(fig, costs, learning_rate, lambda_reg):
"""Plots the costs over the iterations
Args:
costs: history of costs
"""
ax = fig.add_subplot(111)
ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(
learning_rate) + ' __ Lambda: ' + str(lambda_reg))
ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,
mode='expand', borderaxespad=0.0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sigmoid(X):
""" Applies the logistic function to x, element-wise. """
return 1 / (1 + np.exp(-X))
def x_strich(X):
return np.column_stack((np.ones(len(X)), X))
def feature_scaling(X):
x_mean = np.mean(X, axis=0)
x_std = np.std(X, axis=0)
return (X - x_mean) / x_std, x_mean, x_std
def rescale_model(thetas, mean, std):
thetas_rescaled = np.zeros(thetas.shape[0])
for count, value in enumerate(thetas):
if count == 0:
thetas_rescaled[0] = value + thetas[1] * (mean / std)
return thetas_rescaled
def logistic_hypothesis(theta):
"""Combines given list argument in a logistic equation and returns it as a
function
Args:
thetas: list of coefficients
Returns:
lambda that models a logistc function based on thetas and x
"""
return lambda X: sigmoid(np.dot(x_strich(X), theta))
def cross_entropy(X, y):
"""Implements cross-entropy as a function costs(theta) on given traning data
Args:
h: the hypothesis as function
x: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
Returns:
lambda costs(theta) that models the cross-entropy for each x^i
"""
return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (
1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)
def compute_new_theta(X, y, theta, learning_rate, lambda_reg):
"""Updates learnable parameters theta
The update is done by calculating the partial derivities of
the cost function including the linear hypothesis. The
gradients scaled by a scalar are subtracted from the given
theta values.
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
hypothesis: the hypothesis as function
Returns:
theta: Updated theta_0
"""
thetas = np.zeros(len(theta))
thetas = theta * (1 - learning_rate * (lambda_reg / len(X))
) - learning_rate / len(X) * np.sum((logistic_hypothesis(theta)(X) -
y) * x_strich(X).T, axis=1)
return thetas
def L2_regularization_cost(X, theta, lambda_reg):
return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))
def gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):
"""Minimize theta values of a logistic model based on cross-entropy cost function
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
num_iters: number of iterations updating thetas
lambda_reg: regularization strength
Returns:
history_cost: cost after each iteration
history_theta: Updated theta values after each iteration
"""
thetas = [theta]
cost = np.zeros(num_iters)
J = mean_cross_entropy_costs(X, y, lambda_reg)
cost[0] = J(thetas[0])
for i in range(1, num_iters):
thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate,
lambda_reg))
cost[i] = J(thetas[i])
return cost, thetas
def mean_cross_entropy_costs(X, y, lambda_reg=0.0):
"""Implements mean cross-entropy as a function J(theta) on given traning
data
Args:
X: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
hypothesis: the hypothesis as function
cost_func: cost function
Returns:
lambda J(theta) that models the mean cross-entropy
"""
return lambda theta: np.mean(cross_entropy(X, y)(theta)
) + L2_regularization_cost(X, theta, lambda_reg)
def plot_progress(fig, costs, learning_rate, lambda_reg):
"""Plots the costs over the iterations
Args:
costs: history of costs
"""
ax = fig.add_subplot(111)
ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(
learning_rate) + ' __ Lambda: ' + str(lambda_reg))
ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,
mode='expand', borderaxespad=0.0)
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(X):
""" Applies the logistic function to x, element-wise. """
return 1 / (1 + np.exp(-X))
def x_strich(X):
return np.column_stack((np.ones(len(X)), X))
def feature_scaling(X):
x_mean = np.mean(X, axis=0)
x_std = np.std(X, axis=0)
return (X - x_mean) / x_std, x_mean, x_std
def rescale_model(thetas, mean, std):
thetas_rescaled = np.zeros(thetas.shape[0])
for count, value in enumerate(thetas):
if count == 0:
thetas_rescaled[0] = value + thetas[1] * (mean / std)
return thetas_rescaled
def logistic_hypothesis(theta):
"""Combines given list argument in a logistic equation and returns it as a
function
Args:
thetas: list of coefficients
Returns:
lambda that models a logistc function based on thetas and x
"""
return lambda X: sigmoid(np.dot(x_strich(X), theta))
# def regulated_cost(X, y, theta, lambda_reg):
#
# return cross_entropy(X, y)(theta) + L2_regularization_cost(X, theta, lambda_reg)
# def cross_entropy(X, y):
# """
# Computes the cross-entropy for a single logit value and a given target class.
# Parameters
# ----------
# X : float64 or float32
# The logit
# y : int
# The target class
# Returns
# -------
# floatX
# The cross entropy value (negative log-likelihood)
# """
#
# def cost(theta):
# z = x_strich(X).dot(theta)
# mu = np.max([np.zeros(X.shape[0]), -z], axis=0)
# r1 = y * (mu + np.log(np.exp(-mu) + np.exp(-z - mu)))
# mu = np.max([np.zeros(X.shape[0]), z], axis=0)
# r2 = (1 - y) * (mu + np.log(np.exp(-mu) + np.exp(z - mu)))
# return r1 + r2
#
# return cost
def cross_entropy(X, y):
"""Implements cross-entropy as a function costs(theta) on given traning data
Args:
h: the hypothesis as function
x: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
Returns:
lambda costs(theta) that models the cross-entropy for each x^i
"""
return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (
1 - y
) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)
def compute_new_theta(X, y, theta, learning_rate, lambda_reg):
"""Updates learnable parameters theta
The update is done by calculating the partial derivities of
the cost function including the linear hypothesis. The
gradients scaled by a scalar are subtracted from the given
theta values.
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
hypothesis: the hypothesis as function
Returns:
theta: Updated theta_0
"""
thetas = np.zeros(len(theta))
thetas = theta * (1 - learning_rate * (lambda_reg / len(X))) - (
learning_rate / len(X)
) * np.sum((logistic_hypothesis(theta)(X) - y) * x_strich(X).T, axis=1)
return thetas
def L2_regularization_cost(X, theta, lambda_reg):
return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))
def gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):
"""Minimize theta values of a logistic model based on cross-entropy cost function
Args:
X: 2D numpy array of x values
y: array of y values corresponding to x
theta: current theta values
learning_rate: value to scale the negative gradient
num_iters: number of iterations updating thetas
lambda_reg: regularization strength
Returns:
history_cost: cost after each iteration
history_theta: Updated theta values after each iteration
"""
thetas = [theta]
cost = np.zeros(num_iters)
J = mean_cross_entropy_costs(X, y, lambda_reg)
cost[0] = J(thetas[0])
for i in range(1, num_iters):
thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate, lambda_reg))
cost[i] = J(thetas[i])
return cost, thetas
def mean_cross_entropy_costs(X, y, lambda_reg=0.0):
"""Implements mean cross-entropy as a function J(theta) on given traning
data
Args:
X: features as 2D array with shape (m_examples, n_features)
y: ground truth labels for given features with shape (m_examples)
hypothesis: the hypothesis as function
cost_func: cost function
Returns:
lambda J(theta) that models the mean cross-entropy
"""
return lambda theta: np.mean(cross_entropy(X, y)(theta)) + L2_regularization_cost(
X, theta, lambda_reg
)
def plot_progress(fig, costs, learning_rate, lambda_reg):
"""Plots the costs over the iterations
Args:
costs: history of costs
"""
ax = fig.add_subplot(111)
ax.plot(
np.arange(len(costs)),
costs,
alpha=0.8,
label="LR: " + str(learning_rate) + " __ Lambda: " + str(lambda_reg),
)
ax.legend(
bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),
loc="best",
ncol=4,
mode="expand",
borderaxespad=0.0,
)
|
flexible
|
{
"blob_id": "36e7398f576aa1d298a20b4d4a27a7b93e3bd992",
"index": 5482,
"step-1": "<mask token>\n\n\ndef sigmoid(X):\n \"\"\" Applies the logistic function to x, element-wise. \"\"\"\n return 1 / (1 + np.exp(-X))\n\n\ndef x_strich(X):\n return np.column_stack((np.ones(len(X)), X))\n\n\n<mask token>\n\n\ndef rescale_model(thetas, mean, std):\n thetas_rescaled = np.zeros(thetas.shape[0])\n for count, value in enumerate(thetas):\n if count == 0:\n thetas_rescaled[0] = value + thetas[1] * (mean / std)\n return thetas_rescaled\n\n\n<mask token>\n\n\ndef cross_entropy(X, y):\n \"\"\"Implements cross-entropy as a function costs(theta) on given traning data\n Args:\n h: the hypothesis as function\n x: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n Returns:\n lambda costs(theta) that models the cross-entropy for each x^i\n \"\"\"\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (\n 1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)\n\n\n<mask token>\n\n\ndef L2_regularization_cost(X, theta, lambda_reg):\n return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))\n\n\n<mask token>\n\n\ndef mean_cross_entropy_costs(X, y, lambda_reg=0.0):\n \"\"\"Implements mean cross-entropy as a function J(theta) on given traning\n data\n Args:\n X: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n hypothesis: the hypothesis as function\n cost_func: cost function\n Returns:\n lambda J(theta) that models the mean cross-entropy\n \"\"\"\n return lambda theta: np.mean(cross_entropy(X, y)(theta)\n ) + L2_regularization_cost(X, theta, lambda_reg)\n\n\ndef plot_progress(fig, costs, learning_rate, lambda_reg):\n \"\"\"Plots the costs over the iterations\n\n Args:\n costs: history of costs\n \"\"\"\n ax = fig.add_subplot(111)\n ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(\n learning_rate) + ' __ Lambda: ' + str(lambda_reg))\n ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,\n mode='expand', borderaxespad=0.0)\n",
"step-2": "<mask token>\n\n\ndef sigmoid(X):\n \"\"\" Applies the logistic function to x, element-wise. \"\"\"\n return 1 / (1 + np.exp(-X))\n\n\ndef x_strich(X):\n return np.column_stack((np.ones(len(X)), X))\n\n\n<mask token>\n\n\ndef rescale_model(thetas, mean, std):\n thetas_rescaled = np.zeros(thetas.shape[0])\n for count, value in enumerate(thetas):\n if count == 0:\n thetas_rescaled[0] = value + thetas[1] * (mean / std)\n return thetas_rescaled\n\n\n<mask token>\n\n\ndef cross_entropy(X, y):\n \"\"\"Implements cross-entropy as a function costs(theta) on given traning data\n Args:\n h: the hypothesis as function\n x: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n Returns:\n lambda costs(theta) that models the cross-entropy for each x^i\n \"\"\"\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (\n 1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)\n\n\ndef compute_new_theta(X, y, theta, learning_rate, lambda_reg):\n \"\"\"Updates learnable parameters theta\n The update is done by calculating the partial derivities of\n the cost function including the linear hypothesis. The\n gradients scaled by a scalar are subtracted from the given\n theta values.\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n hypothesis: the hypothesis as function\n Returns:\n theta: Updated theta_0\n \"\"\"\n thetas = np.zeros(len(theta))\n thetas = theta * (1 - learning_rate * (lambda_reg / len(X))\n ) - learning_rate / len(X) * np.sum((logistic_hypothesis(theta)(X) -\n y) * x_strich(X).T, axis=1)\n return thetas\n\n\ndef L2_regularization_cost(X, theta, lambda_reg):\n return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))\n\n\ndef gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n \"\"\"Minimize theta values of a logistic model based on cross-entropy cost function\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n num_iters: number of iterations updating thetas\n lambda_reg: regularization strength\n Returns:\n history_cost: cost after each iteration\n history_theta: Updated theta values after each iteration\n \"\"\"\n thetas = [theta]\n cost = np.zeros(num_iters)\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate,\n lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas\n\n\ndef mean_cross_entropy_costs(X, y, lambda_reg=0.0):\n \"\"\"Implements mean cross-entropy as a function J(theta) on given traning\n data\n Args:\n X: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n hypothesis: the hypothesis as function\n cost_func: cost function\n Returns:\n lambda J(theta) that models the mean cross-entropy\n \"\"\"\n return lambda theta: np.mean(cross_entropy(X, y)(theta)\n ) + L2_regularization_cost(X, theta, lambda_reg)\n\n\ndef plot_progress(fig, costs, learning_rate, lambda_reg):\n \"\"\"Plots the costs over the iterations\n\n Args:\n costs: history of costs\n \"\"\"\n ax = fig.add_subplot(111)\n ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(\n learning_rate) + ' __ Lambda: ' + str(lambda_reg))\n ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,\n mode='expand', borderaxespad=0.0)\n",
"step-3": "<mask token>\n\n\ndef sigmoid(X):\n \"\"\" Applies the logistic function to x, element-wise. \"\"\"\n return 1 / (1 + np.exp(-X))\n\n\ndef x_strich(X):\n return np.column_stack((np.ones(len(X)), X))\n\n\n<mask token>\n\n\ndef rescale_model(thetas, mean, std):\n thetas_rescaled = np.zeros(thetas.shape[0])\n for count, value in enumerate(thetas):\n if count == 0:\n thetas_rescaled[0] = value + thetas[1] * (mean / std)\n return thetas_rescaled\n\n\ndef logistic_hypothesis(theta):\n \"\"\"Combines given list argument in a logistic equation and returns it as a\n function\n Args:\n thetas: list of coefficients\n Returns:\n lambda that models a logistc function based on thetas and x\n \"\"\"\n return lambda X: sigmoid(np.dot(x_strich(X), theta))\n\n\ndef cross_entropy(X, y):\n \"\"\"Implements cross-entropy as a function costs(theta) on given traning data\n Args:\n h: the hypothesis as function\n x: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n Returns:\n lambda costs(theta) that models the cross-entropy for each x^i\n \"\"\"\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (\n 1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)\n\n\ndef compute_new_theta(X, y, theta, learning_rate, lambda_reg):\n \"\"\"Updates learnable parameters theta\n The update is done by calculating the partial derivities of\n the cost function including the linear hypothesis. The\n gradients scaled by a scalar are subtracted from the given\n theta values.\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n hypothesis: the hypothesis as function\n Returns:\n theta: Updated theta_0\n \"\"\"\n thetas = np.zeros(len(theta))\n thetas = theta * (1 - learning_rate * (lambda_reg / len(X))\n ) - learning_rate / len(X) * np.sum((logistic_hypothesis(theta)(X) -\n y) * x_strich(X).T, axis=1)\n return thetas\n\n\ndef L2_regularization_cost(X, theta, lambda_reg):\n return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))\n\n\ndef gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n \"\"\"Minimize theta values of a logistic model based on cross-entropy cost function\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n num_iters: number of iterations updating thetas\n lambda_reg: regularization strength\n Returns:\n history_cost: cost after each iteration\n history_theta: Updated theta values after each iteration\n \"\"\"\n thetas = [theta]\n cost = np.zeros(num_iters)\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate,\n lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas\n\n\ndef mean_cross_entropy_costs(X, y, lambda_reg=0.0):\n \"\"\"Implements mean cross-entropy as a function J(theta) on given traning\n data\n Args:\n X: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n hypothesis: the hypothesis as function\n cost_func: cost function\n Returns:\n lambda J(theta) that models the mean cross-entropy\n \"\"\"\n return lambda theta: np.mean(cross_entropy(X, y)(theta)\n ) + L2_regularization_cost(X, theta, lambda_reg)\n\n\ndef plot_progress(fig, costs, learning_rate, lambda_reg):\n \"\"\"Plots the costs over the iterations\n\n Args:\n costs: history of costs\n \"\"\"\n ax = fig.add_subplot(111)\n ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(\n learning_rate) + ' __ Lambda: ' + str(lambda_reg))\n ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,\n mode='expand', borderaxespad=0.0)\n",
"step-4": "<mask token>\n\n\ndef sigmoid(X):\n \"\"\" Applies the logistic function to x, element-wise. \"\"\"\n return 1 / (1 + np.exp(-X))\n\n\ndef x_strich(X):\n return np.column_stack((np.ones(len(X)), X))\n\n\ndef feature_scaling(X):\n x_mean = np.mean(X, axis=0)\n x_std = np.std(X, axis=0)\n return (X - x_mean) / x_std, x_mean, x_std\n\n\ndef rescale_model(thetas, mean, std):\n thetas_rescaled = np.zeros(thetas.shape[0])\n for count, value in enumerate(thetas):\n if count == 0:\n thetas_rescaled[0] = value + thetas[1] * (mean / std)\n return thetas_rescaled\n\n\ndef logistic_hypothesis(theta):\n \"\"\"Combines given list argument in a logistic equation and returns it as a\n function\n Args:\n thetas: list of coefficients\n Returns:\n lambda that models a logistc function based on thetas and x\n \"\"\"\n return lambda X: sigmoid(np.dot(x_strich(X), theta))\n\n\ndef cross_entropy(X, y):\n \"\"\"Implements cross-entropy as a function costs(theta) on given traning data\n Args:\n h: the hypothesis as function\n x: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n Returns:\n lambda costs(theta) that models the cross-entropy for each x^i\n \"\"\"\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-09) - (\n 1 - y) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-09)\n\n\ndef compute_new_theta(X, y, theta, learning_rate, lambda_reg):\n \"\"\"Updates learnable parameters theta\n The update is done by calculating the partial derivities of\n the cost function including the linear hypothesis. The\n gradients scaled by a scalar are subtracted from the given\n theta values.\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n hypothesis: the hypothesis as function\n Returns:\n theta: Updated theta_0\n \"\"\"\n thetas = np.zeros(len(theta))\n thetas = theta * (1 - learning_rate * (lambda_reg / len(X))\n ) - learning_rate / len(X) * np.sum((logistic_hypothesis(theta)(X) -\n y) * x_strich(X).T, axis=1)\n return thetas\n\n\ndef L2_regularization_cost(X, theta, lambda_reg):\n return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))\n\n\ndef gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n \"\"\"Minimize theta values of a logistic model based on cross-entropy cost function\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n num_iters: number of iterations updating thetas\n lambda_reg: regularization strength\n Returns:\n history_cost: cost after each iteration\n history_theta: Updated theta values after each iteration\n \"\"\"\n thetas = [theta]\n cost = np.zeros(num_iters)\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate,\n lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas\n\n\ndef mean_cross_entropy_costs(X, y, lambda_reg=0.0):\n \"\"\"Implements mean cross-entropy as a function J(theta) on given traning\n data\n Args:\n X: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n hypothesis: the hypothesis as function\n cost_func: cost function\n Returns:\n lambda J(theta) that models the mean cross-entropy\n \"\"\"\n return lambda theta: np.mean(cross_entropy(X, y)(theta)\n ) + L2_regularization_cost(X, theta, lambda_reg)\n\n\ndef plot_progress(fig, costs, learning_rate, lambda_reg):\n \"\"\"Plots the costs over the iterations\n\n Args:\n costs: history of costs\n \"\"\"\n ax = fig.add_subplot(111)\n ax.plot(np.arange(len(costs)), costs, alpha=0.8, label='LR: ' + str(\n learning_rate) + ' __ Lambda: ' + str(lambda_reg))\n ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='best', ncol=4,\n mode='expand', borderaxespad=0.0)\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef sigmoid(X):\n \"\"\" Applies the logistic function to x, element-wise. \"\"\"\n return 1 / (1 + np.exp(-X))\n\n\ndef x_strich(X):\n\n return np.column_stack((np.ones(len(X)), X))\n\n\ndef feature_scaling(X):\n x_mean = np.mean(X, axis=0)\n x_std = np.std(X, axis=0)\n\n return (X - x_mean) / x_std, x_mean, x_std\n\n\ndef rescale_model(thetas, mean, std):\n thetas_rescaled = np.zeros(thetas.shape[0])\n for count, value in enumerate(thetas):\n if count == 0:\n thetas_rescaled[0] = value + thetas[1] * (mean / std)\n return thetas_rescaled\n\n\ndef logistic_hypothesis(theta):\n \"\"\"Combines given list argument in a logistic equation and returns it as a\n function\n Args:\n thetas: list of coefficients\n Returns:\n lambda that models a logistc function based on thetas and x\n \"\"\"\n return lambda X: sigmoid(np.dot(x_strich(X), theta))\n\n\n# def regulated_cost(X, y, theta, lambda_reg):\n#\n# return cross_entropy(X, y)(theta) + L2_regularization_cost(X, theta, lambda_reg)\n\n\n# def cross_entropy(X, y):\n# \"\"\"\n# Computes the cross-entropy for a single logit value and a given target class.\n# Parameters\n# ----------\n# X : float64 or float32\n# The logit\n# y : int\n# The target class\n# Returns\n# -------\n# floatX\n# The cross entropy value (negative log-likelihood)\n# \"\"\"\n#\n# def cost(theta):\n# z = x_strich(X).dot(theta)\n# mu = np.max([np.zeros(X.shape[0]), -z], axis=0)\n# r1 = y * (mu + np.log(np.exp(-mu) + np.exp(-z - mu)))\n# mu = np.max([np.zeros(X.shape[0]), z], axis=0)\n# r2 = (1 - y) * (mu + np.log(np.exp(-mu) + np.exp(z - mu)))\n# return r1 + r2\n#\n# return cost\n\n\ndef cross_entropy(X, y):\n \"\"\"Implements cross-entropy as a function costs(theta) on given traning data\n Args:\n h: the hypothesis as function\n x: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n Returns:\n lambda costs(theta) that models the cross-entropy for each x^i\n \"\"\"\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (\n 1 - y\n ) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)\n\n\ndef compute_new_theta(X, y, theta, learning_rate, lambda_reg):\n \"\"\"Updates learnable parameters theta\n The update is done by calculating the partial derivities of\n the cost function including the linear hypothesis. The\n gradients scaled by a scalar are subtracted from the given\n theta values.\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n hypothesis: the hypothesis as function\n Returns:\n theta: Updated theta_0\n \"\"\"\n\n thetas = np.zeros(len(theta))\n thetas = theta * (1 - learning_rate * (lambda_reg / len(X))) - (\n learning_rate / len(X)\n ) * np.sum((logistic_hypothesis(theta)(X) - y) * x_strich(X).T, axis=1)\n\n return thetas\n\n\ndef L2_regularization_cost(X, theta, lambda_reg):\n return np.sum(theta ** 2) * (lambda_reg / (2 * len(X)))\n\n\ndef gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n \"\"\"Minimize theta values of a logistic model based on cross-entropy cost function\n Args:\n X: 2D numpy array of x values\n y: array of y values corresponding to x\n theta: current theta values\n learning_rate: value to scale the negative gradient\n num_iters: number of iterations updating thetas\n lambda_reg: regularization strength\n Returns:\n history_cost: cost after each iteration\n history_theta: Updated theta values after each iteration\n \"\"\"\n thetas = [theta]\n cost = np.zeros(num_iters)\n\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate, lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas\n\n\ndef mean_cross_entropy_costs(X, y, lambda_reg=0.0):\n \"\"\"Implements mean cross-entropy as a function J(theta) on given traning\n data\n Args:\n X: features as 2D array with shape (m_examples, n_features)\n y: ground truth labels for given features with shape (m_examples)\n hypothesis: the hypothesis as function\n cost_func: cost function\n Returns:\n lambda J(theta) that models the mean cross-entropy\n \"\"\"\n return lambda theta: np.mean(cross_entropy(X, y)(theta)) + L2_regularization_cost(\n X, theta, lambda_reg\n )\n\n\ndef plot_progress(fig, costs, learning_rate, lambda_reg):\n \"\"\"Plots the costs over the iterations\n\n Args:\n costs: history of costs\n \"\"\"\n ax = fig.add_subplot(111)\n ax.plot(\n np.arange(len(costs)),\n costs,\n alpha=0.8,\n label=\"LR: \" + str(learning_rate) + \" __ Lambda: \" + str(lambda_reg),\n )\n\n ax.legend(\n bbox_to_anchor=(0.0, 1.02, 1.0, 0.102),\n loc=\"best\",\n ncol=4,\n mode=\"expand\",\n borderaxespad=0.0,\n )",
"step-ids": [
7,
9,
10,
11,
13
]
}
|
[
7,
9,
10,
11,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(4, N + 1):
two_per = 10 ** 6
three_per = 10 ** 6
if i % 3 == 0:
three_per = dp_table[i // 3] + 1
if i % 2 == 0:
two_per = dp_table[i // 2] + 1
minus = dp_table[i - 1] + 1
dp_table[i] = min(minus, two_per, three_per)
print(dp_table[N])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
N = int(sys.stdin.readline())
dp_table = [(0) for _ in range(10 ** 6 + 1)]
dp_table[2], dp_table[3] = 1, 1
for i in range(4, N + 1):
two_per = 10 ** 6
three_per = 10 ** 6
if i % 3 == 0:
three_per = dp_table[i // 3] + 1
if i % 2 == 0:
two_per = dp_table[i // 2] + 1
minus = dp_table[i - 1] + 1
dp_table[i] = min(minus, two_per, three_per)
print(dp_table[N])
<|reserved_special_token_1|>
import sys
N = int(sys.stdin.readline())
dp_table = [(0) for _ in range(10 ** 6 + 1)]
dp_table[2], dp_table[3] = 1, 1
for i in range(4, N + 1):
two_per = 10 ** 6
three_per = 10 ** 6
if i % 3 == 0:
three_per = dp_table[i // 3] + 1
if i % 2 == 0:
two_per = dp_table[i // 2] + 1
minus = dp_table[i - 1] + 1
dp_table[i] = min(minus, two_per, three_per)
print(dp_table[N])
<|reserved_special_token_1|>
# 1로 만들기
import sys
N = int(sys.stdin.readline())
dp_table = [0 for _ in range(10**6 + 1)]
dp_table[2], dp_table[3] = 1, 1
for i in range(4,N+1):
two_per = 10**6
three_per = 10**6
if i % 3 ==0:
three_per = dp_table[i//3] + 1
if i % 2 ==0:
two_per = dp_table[i//2] + 1
minus = dp_table[i-1] + 1
dp_table[i] = min(minus, two_per, three_per)
# print(i, dp_table[i])
print(dp_table[N])
|
flexible
|
{
"blob_id": "34a8fc38ed875e1c564f535348dc0d5d88c76ab1",
"index": 7281,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(4, N + 1):\n two_per = 10 ** 6\n three_per = 10 ** 6\n if i % 3 == 0:\n three_per = dp_table[i // 3] + 1\n if i % 2 == 0:\n two_per = dp_table[i // 2] + 1\n minus = dp_table[i - 1] + 1\n dp_table[i] = min(minus, two_per, three_per)\nprint(dp_table[N])\n",
"step-3": "<mask token>\nN = int(sys.stdin.readline())\ndp_table = [(0) for _ in range(10 ** 6 + 1)]\ndp_table[2], dp_table[3] = 1, 1\nfor i in range(4, N + 1):\n two_per = 10 ** 6\n three_per = 10 ** 6\n if i % 3 == 0:\n three_per = dp_table[i // 3] + 1\n if i % 2 == 0:\n two_per = dp_table[i // 2] + 1\n minus = dp_table[i - 1] + 1\n dp_table[i] = min(minus, two_per, three_per)\nprint(dp_table[N])\n",
"step-4": "import sys\nN = int(sys.stdin.readline())\ndp_table = [(0) for _ in range(10 ** 6 + 1)]\ndp_table[2], dp_table[3] = 1, 1\nfor i in range(4, N + 1):\n two_per = 10 ** 6\n three_per = 10 ** 6\n if i % 3 == 0:\n three_per = dp_table[i // 3] + 1\n if i % 2 == 0:\n two_per = dp_table[i // 2] + 1\n minus = dp_table[i - 1] + 1\n dp_table[i] = min(minus, two_per, three_per)\nprint(dp_table[N])\n",
"step-5": "# 1로 만들기\nimport sys\nN = int(sys.stdin.readline())\ndp_table = [0 for _ in range(10**6 + 1)]\ndp_table[2], dp_table[3] = 1, 1\n\nfor i in range(4,N+1):\n two_per = 10**6\n three_per = 10**6\n if i % 3 ==0:\n three_per = dp_table[i//3] + 1\n if i % 2 ==0:\n two_per = dp_table[i//2] + 1\n minus = dp_table[i-1] + 1\n dp_table[i] = min(minus, two_per, three_per)\n # print(i, dp_table[i])\n\nprint(dp_table[N])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from bs4 import BeautifulSoup
import re
class Rules:
def __init__(self):
self.ruleCollection = {
"1" : self.rule1,
"2" : self.rule2,
"3" : self.rule3,
"4" : self.rule4,
"5" : self.rule5,
"6" : self.rule6,
"7" : self.rule7,
"8" : self.rule8,
"9" : self.rule9,
"10" : self.rule10,
}
def getRule(self, id):
return self.ruleCollection[id]
# Image without text alternatives
def rule1(self,dom):
return dom.find_all(self._img_without_alt)
# Embeded multimedia without noembed (text or audio)
def rule2(self,dom):
video_arr = []
for embed in dom.find_all("embed"):
if not embed.noembed:
video_arr.append(embed)
return video_arr
#color cues
#without the definitions in css
#This rule needs to be improved
def rule3(self,dom):
clrcue_arr = []
for fnt in dom.find_all('font'):
if fnt.has_attr('color'):
clrcue_arr.append(fnt)
for spn in dom.find_all('span'):
if spn.has_attr('style'):
clrcue_arr.append(spn)
return clrcue_arr
#Table without summary
def rule4(self,dom):
return dom.find_all(self._tbl_without_summ)
#Table without caption
def rule5(self,dom):
twcap_arr = [];
for tb in dom.find_all("table"):
if not tb.caption:
twcap_arr.append(tb)
return twcap_arr
def rule6(self,dom):
lbl_arr = [];
inputElems =[]
inputElems.extend(dom.find_all(["textarea", "select"]))
inputElems.extend(dom.find_all(type=["text","password", "checkbox", "radio", "file"]))
labels = dom.find_all('label')
for input in inputElems:
hasLabel = False
if input.has_attr('id'):
id = input['id']
for lbl in labels:
if lbl.has_attr("for") and lbl['for'] == id:
hasLabel = True
break
if not hasLabel:
lbl_arr.append(input)
return lbl_arr
def rule7(self,dom):
dblclk_arr = []
dblclk_arr = dom.find_all(ondblclick = True, onkeypress = False)
return dblclk_arr
def rule8(self,dom):
title_arr = []
isTitle = dom.find('title')
if isTitle is None:
title_arr.append(dom.find('head'))
return title_arr
def rule9(self,dom):
link_arr = []
url_tags = ['http', 'https', '://www.' , 'www' ]
for link in dom.find_all('a'):
if not ('http' in link or 'https' in link or '://www.' in link or 'www' in link):
link_arr.append(link)
return link_arr
def rule10(self,dom):
tab_arr = []
for tab in dom.find_all('a', 'input', ondblclick = True, onkeydown = True, onkeypress = True):
if not tab.has_attr('tabindex'):
tab_arr.append(tab)
return tab_arr
def _img_without_alt(self,tag):
return tag.name == "img" and not tag.has_attr("alt")
def _tbl_without_summ(self,tag):
return tag.name == "table" and not tag.has_attr("summary")
#for testing
|
normal
|
{
"blob_id": "7747cbb1a1ed191b616b0d1bcfd51cdea05067f5",
"index": 5954,
"step-1": "<mask token>\n\n\nclass Rules:\n\n def __init__(self):\n self.ruleCollection = {'1': self.rule1, '2': self.rule2, '3': self.\n rule3, '4': self.rule4, '5': self.rule5, '6': self.rule6, '7':\n self.rule7, '8': self.rule8, '9': self.rule9, '10': self.rule10}\n <mask token>\n\n def rule1(self, dom):\n return dom.find_all(self._img_without_alt)\n <mask token>\n\n def rule3(self, dom):\n clrcue_arr = []\n for fnt in dom.find_all('font'):\n if fnt.has_attr('color'):\n clrcue_arr.append(fnt)\n for spn in dom.find_all('span'):\n if spn.has_attr('style'):\n clrcue_arr.append(spn)\n return clrcue_arr\n <mask token>\n <mask token>\n\n def rule6(self, dom):\n lbl_arr = []\n inputElems = []\n inputElems.extend(dom.find_all(['textarea', 'select']))\n inputElems.extend(dom.find_all(type=['text', 'password', 'checkbox',\n 'radio', 'file']))\n labels = dom.find_all('label')\n for input in inputElems:\n hasLabel = False\n if input.has_attr('id'):\n id = input['id']\n for lbl in labels:\n if lbl.has_attr('for') and lbl['for'] == id:\n hasLabel = True\n break\n if not hasLabel:\n lbl_arr.append(input)\n return lbl_arr\n <mask token>\n\n def rule8(self, dom):\n title_arr = []\n isTitle = dom.find('title')\n if isTitle is None:\n title_arr.append(dom.find('head'))\n return title_arr\n <mask token>\n <mask token>\n\n def _img_without_alt(self, tag):\n return tag.name == 'img' and not tag.has_attr('alt')\n\n def _tbl_without_summ(self, tag):\n return tag.name == 'table' and not tag.has_attr('summary')\n",
"step-2": "<mask token>\n\n\nclass Rules:\n\n def __init__(self):\n self.ruleCollection = {'1': self.rule1, '2': self.rule2, '3': self.\n rule3, '4': self.rule4, '5': self.rule5, '6': self.rule6, '7':\n self.rule7, '8': self.rule8, '9': self.rule9, '10': self.rule10}\n\n def getRule(self, id):\n return self.ruleCollection[id]\n\n def rule1(self, dom):\n return dom.find_all(self._img_without_alt)\n <mask token>\n\n def rule3(self, dom):\n clrcue_arr = []\n for fnt in dom.find_all('font'):\n if fnt.has_attr('color'):\n clrcue_arr.append(fnt)\n for spn in dom.find_all('span'):\n if spn.has_attr('style'):\n clrcue_arr.append(spn)\n return clrcue_arr\n <mask token>\n <mask token>\n\n def rule6(self, dom):\n lbl_arr = []\n inputElems = []\n inputElems.extend(dom.find_all(['textarea', 'select']))\n inputElems.extend(dom.find_all(type=['text', 'password', 'checkbox',\n 'radio', 'file']))\n labels = dom.find_all('label')\n for input in inputElems:\n hasLabel = False\n if input.has_attr('id'):\n id = input['id']\n for lbl in labels:\n if lbl.has_attr('for') and lbl['for'] == id:\n hasLabel = True\n break\n if not hasLabel:\n lbl_arr.append(input)\n return lbl_arr\n\n def rule7(self, dom):\n dblclk_arr = []\n dblclk_arr = dom.find_all(ondblclick=True, onkeypress=False)\n return dblclk_arr\n\n def rule8(self, dom):\n title_arr = []\n isTitle = dom.find('title')\n if isTitle is None:\n title_arr.append(dom.find('head'))\n return title_arr\n\n def rule9(self, dom):\n link_arr = []\n url_tags = ['http', 'https', '://www.', 'www']\n for link in dom.find_all('a'):\n if not ('http' in link or 'https' in link or '://www.' in link or\n 'www' in link):\n link_arr.append(link)\n return link_arr\n <mask token>\n\n def _img_without_alt(self, tag):\n return tag.name == 'img' and not tag.has_attr('alt')\n\n def _tbl_without_summ(self, tag):\n return tag.name == 'table' and not tag.has_attr('summary')\n",
"step-3": "<mask token>\n\n\nclass Rules:\n\n def __init__(self):\n self.ruleCollection = {'1': self.rule1, '2': self.rule2, '3': self.\n rule3, '4': self.rule4, '5': self.rule5, '6': self.rule6, '7':\n self.rule7, '8': self.rule8, '9': self.rule9, '10': self.rule10}\n\n def getRule(self, id):\n return self.ruleCollection[id]\n\n def rule1(self, dom):\n return dom.find_all(self._img_without_alt)\n\n def rule2(self, dom):\n video_arr = []\n for embed in dom.find_all('embed'):\n if not embed.noembed:\n video_arr.append(embed)\n return video_arr\n\n def rule3(self, dom):\n clrcue_arr = []\n for fnt in dom.find_all('font'):\n if fnt.has_attr('color'):\n clrcue_arr.append(fnt)\n for spn in dom.find_all('span'):\n if spn.has_attr('style'):\n clrcue_arr.append(spn)\n return clrcue_arr\n <mask token>\n\n def rule5(self, dom):\n twcap_arr = []\n for tb in dom.find_all('table'):\n if not tb.caption:\n twcap_arr.append(tb)\n return twcap_arr\n\n def rule6(self, dom):\n lbl_arr = []\n inputElems = []\n inputElems.extend(dom.find_all(['textarea', 'select']))\n inputElems.extend(dom.find_all(type=['text', 'password', 'checkbox',\n 'radio', 'file']))\n labels = dom.find_all('label')\n for input in inputElems:\n hasLabel = False\n if input.has_attr('id'):\n id = input['id']\n for lbl in labels:\n if lbl.has_attr('for') and lbl['for'] == id:\n hasLabel = True\n break\n if not hasLabel:\n lbl_arr.append(input)\n return lbl_arr\n\n def rule7(self, dom):\n dblclk_arr = []\n dblclk_arr = dom.find_all(ondblclick=True, onkeypress=False)\n return dblclk_arr\n\n def rule8(self, dom):\n title_arr = []\n isTitle = dom.find('title')\n if isTitle is None:\n title_arr.append(dom.find('head'))\n return title_arr\n\n def rule9(self, dom):\n link_arr = []\n url_tags = ['http', 'https', '://www.', 'www']\n for link in dom.find_all('a'):\n if not ('http' in link or 'https' in link or '://www.' in link or\n 'www' in link):\n link_arr.append(link)\n return link_arr\n <mask token>\n\n def _img_without_alt(self, tag):\n return tag.name == 'img' and not tag.has_attr('alt')\n\n def _tbl_without_summ(self, tag):\n return tag.name == 'table' and not tag.has_attr('summary')\n",
"step-4": "<mask token>\n\n\nclass Rules:\n\n def __init__(self):\n self.ruleCollection = {'1': self.rule1, '2': self.rule2, '3': self.\n rule3, '4': self.rule4, '5': self.rule5, '6': self.rule6, '7':\n self.rule7, '8': self.rule8, '9': self.rule9, '10': self.rule10}\n\n def getRule(self, id):\n return self.ruleCollection[id]\n\n def rule1(self, dom):\n return dom.find_all(self._img_without_alt)\n\n def rule2(self, dom):\n video_arr = []\n for embed in dom.find_all('embed'):\n if not embed.noembed:\n video_arr.append(embed)\n return video_arr\n\n def rule3(self, dom):\n clrcue_arr = []\n for fnt in dom.find_all('font'):\n if fnt.has_attr('color'):\n clrcue_arr.append(fnt)\n for spn in dom.find_all('span'):\n if spn.has_attr('style'):\n clrcue_arr.append(spn)\n return clrcue_arr\n <mask token>\n\n def rule5(self, dom):\n twcap_arr = []\n for tb in dom.find_all('table'):\n if not tb.caption:\n twcap_arr.append(tb)\n return twcap_arr\n\n def rule6(self, dom):\n lbl_arr = []\n inputElems = []\n inputElems.extend(dom.find_all(['textarea', 'select']))\n inputElems.extend(dom.find_all(type=['text', 'password', 'checkbox',\n 'radio', 'file']))\n labels = dom.find_all('label')\n for input in inputElems:\n hasLabel = False\n if input.has_attr('id'):\n id = input['id']\n for lbl in labels:\n if lbl.has_attr('for') and lbl['for'] == id:\n hasLabel = True\n break\n if not hasLabel:\n lbl_arr.append(input)\n return lbl_arr\n\n def rule7(self, dom):\n dblclk_arr = []\n dblclk_arr = dom.find_all(ondblclick=True, onkeypress=False)\n return dblclk_arr\n\n def rule8(self, dom):\n title_arr = []\n isTitle = dom.find('title')\n if isTitle is None:\n title_arr.append(dom.find('head'))\n return title_arr\n\n def rule9(self, dom):\n link_arr = []\n url_tags = ['http', 'https', '://www.', 'www']\n for link in dom.find_all('a'):\n if not ('http' in link or 'https' in link or '://www.' in link or\n 'www' in link):\n link_arr.append(link)\n return link_arr\n\n def rule10(self, dom):\n tab_arr = []\n for tab in dom.find_all('a', 'input', ondblclick=True, onkeydown=\n True, onkeypress=True):\n if not tab.has_attr('tabindex'):\n tab_arr.append(tab)\n return tab_arr\n\n def _img_without_alt(self, tag):\n return tag.name == 'img' and not tag.has_attr('alt')\n\n def _tbl_without_summ(self, tag):\n return tag.name == 'table' and not tag.has_attr('summary')\n",
"step-5": "from bs4 import BeautifulSoup\nimport re\n\nclass Rules:\n def __init__(self):\n self.ruleCollection = {\n \"1\" : self.rule1,\n \"2\" : self.rule2,\n \"3\" : self.rule3,\n \"4\" : self.rule4,\n \"5\" : self.rule5,\n \"6\" : self.rule6,\n \"7\" : self.rule7,\n \"8\" : self.rule8,\n \"9\" : self.rule9,\n \"10\" : self.rule10,\n }\n \n def getRule(self, id):\n return self.ruleCollection[id]\n # Image without text alternatives\n def rule1(self,dom):\n return dom.find_all(self._img_without_alt)\n \n # Embeded multimedia without noembed (text or audio)\n def rule2(self,dom):\n video_arr = []\n for embed in dom.find_all(\"embed\"):\n if not embed.noembed:\n video_arr.append(embed)\n return video_arr\n \n #color cues\n #without the definitions in css\n #This rule needs to be improved\n def rule3(self,dom):\n clrcue_arr = []\n for fnt in dom.find_all('font'):\n if fnt.has_attr('color'):\n clrcue_arr.append(fnt)\n for spn in dom.find_all('span'):\n if spn.has_attr('style'):\n clrcue_arr.append(spn)\n return clrcue_arr\n \n #Table without summary\n def rule4(self,dom):\n return dom.find_all(self._tbl_without_summ)\n \n \n #Table without caption\n def rule5(self,dom):\n twcap_arr = [];\n for tb in dom.find_all(\"table\"):\n if not tb.caption:\n twcap_arr.append(tb)\n return twcap_arr\n \n def rule6(self,dom):\n lbl_arr = [];\n inputElems =[]\n inputElems.extend(dom.find_all([\"textarea\", \"select\"]))\n inputElems.extend(dom.find_all(type=[\"text\",\"password\", \"checkbox\", \"radio\", \"file\"]))\n labels = dom.find_all('label')\n for input in inputElems:\n hasLabel = False\n if input.has_attr('id'):\n id = input['id']\n \n for lbl in labels:\n if lbl.has_attr(\"for\") and lbl['for'] == id:\n hasLabel = True\n break\n \n if not hasLabel:\n lbl_arr.append(input)\n\n return lbl_arr\n \n def rule7(self,dom):\n dblclk_arr = []\n dblclk_arr = dom.find_all(ondblclick = True, onkeypress = False)\n return dblclk_arr\n \n def rule8(self,dom):\n title_arr = []\n isTitle = dom.find('title')\n if isTitle is None:\n title_arr.append(dom.find('head'))\n return title_arr\n \n def rule9(self,dom):\n link_arr = []\n url_tags = ['http', 'https', '://www.' , 'www' ]\n for link in dom.find_all('a'):\n if not ('http' in link or 'https' in link or '://www.' in link or 'www' in link):\n link_arr.append(link)\n \n return link_arr\n \n def rule10(self,dom):\n tab_arr = []\n for tab in dom.find_all('a', 'input', ondblclick = True, onkeydown = True, onkeypress = True):\n if not tab.has_attr('tabindex'):\n tab_arr.append(tab)\n \n return tab_arr \n \n def _img_without_alt(self,tag):\n return tag.name == \"img\" and not tag.has_attr(\"alt\")\n \n def _tbl_without_summ(self,tag):\n return tag.name == \"table\" and not tag.has_attr(\"summary\")\n \n#for testing\n\n\n\n",
"step-ids": [
8,
11,
13,
14,
17
]
}
|
[
8,
11,
13,
14,
17
] |
import numpy as np
class Adaline:
def __init__(self, eta = 0.0001, n_iter = 2000):
self.eta = eta
self.n_iter = n_iter
self.error = []
def fit(self, X, Y):
X = np.hstack((np.ones((X.shape[0],1)), X))
self.w = np.random.uniform(-1, 1, (X.shape[1], 1))
for n in range(self.n_iter):
y = X.dot(self.w)
error = Y - y
self.w += self.eta * X.T.dot(error)
cost = 1./2 * np.sum(error**2)
self.error.append(cost)
return self
def predict(self, X):
X = np.hstack((np.ones((X.shape[0],1)), X))
Y_hat = X.dot(self.w)
return Y_hat
|
normal
|
{
"blob_id": "02e711dfc122007c74949cd9f86e2aeb9d334871",
"index": 329,
"step-1": "<mask token>\n\n\nclass Adaline:\n <mask token>\n\n def fit(self, X, Y):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n self.w = np.random.uniform(-1, 1, (X.shape[1], 1))\n for n in range(self.n_iter):\n y = X.dot(self.w)\n error = Y - y\n self.w += self.eta * X.T.dot(error)\n cost = 1.0 / 2 * np.sum(error ** 2)\n self.error.append(cost)\n return self\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Adaline:\n\n def __init__(self, eta=0.0001, n_iter=2000):\n self.eta = eta\n self.n_iter = n_iter\n self.error = []\n\n def fit(self, X, Y):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n self.w = np.random.uniform(-1, 1, (X.shape[1], 1))\n for n in range(self.n_iter):\n y = X.dot(self.w)\n error = Y - y\n self.w += self.eta * X.T.dot(error)\n cost = 1.0 / 2 * np.sum(error ** 2)\n self.error.append(cost)\n return self\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Adaline:\n\n def __init__(self, eta=0.0001, n_iter=2000):\n self.eta = eta\n self.n_iter = n_iter\n self.error = []\n\n def fit(self, X, Y):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n self.w = np.random.uniform(-1, 1, (X.shape[1], 1))\n for n in range(self.n_iter):\n y = X.dot(self.w)\n error = Y - y\n self.w += self.eta * X.T.dot(error)\n cost = 1.0 / 2 * np.sum(error ** 2)\n self.error.append(cost)\n return self\n\n def predict(self, X):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n Y_hat = X.dot(self.w)\n return Y_hat\n",
"step-4": "import numpy as np\n\n\nclass Adaline:\n\n def __init__(self, eta=0.0001, n_iter=2000):\n self.eta = eta\n self.n_iter = n_iter\n self.error = []\n\n def fit(self, X, Y):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n self.w = np.random.uniform(-1, 1, (X.shape[1], 1))\n for n in range(self.n_iter):\n y = X.dot(self.w)\n error = Y - y\n self.w += self.eta * X.T.dot(error)\n cost = 1.0 / 2 * np.sum(error ** 2)\n self.error.append(cost)\n return self\n\n def predict(self, X):\n X = np.hstack((np.ones((X.shape[0], 1)), X))\n Y_hat = X.dot(self.w)\n return Y_hat\n",
"step-5": "import numpy as np\n\n\nclass Adaline:\n def __init__(self, eta = 0.0001, n_iter = 2000):\n self.eta = eta\n self.n_iter = n_iter\n self.error = []\n\n def fit(self, X, Y):\n X = np.hstack((np.ones((X.shape[0],1)), X))\n self.w = np.random.uniform(-1, 1, (X.shape[1], 1))\n for n in range(self.n_iter):\n y = X.dot(self.w)\n error = Y - y\n self.w += self.eta * X.T.dot(error)\n cost = 1./2 * np.sum(error**2)\n self.error.append(cost)\n return self\n\n def predict(self, X):\n X = np.hstack((np.ones((X.shape[0],1)), X))\n Y_hat = X.dot(self.w)\n return Y_hat\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
def get_attachment_station_coords(station):
if (station == "gripper1"):
coords = [0.48, 0.05, 0.161]
elif (station == "gripper2"):
coords = [0.28, 0.05, 0.13]
elif (station == "syringe"):
coords = [0.405, 0.745, 0.213]
else:
coords = [0.0, 0.0, 0.0]
# Move the gantry to the coordinates
return coords
def get_station_coords(station):
if(station == "steaks"):
coords= [0.4625, 0.375, 0.14]
elif(station == "griddle"):
coords = [0.73, 0.375, 0.05]
elif(station == "steak_flip_initial"):
coords = [0.73, 0.7, 0.166]
elif(station == "steak_flip_done"):
coords = [0.73, 0.33, 0.166]
elif(station == "steak_flip_drop"):
coords = [0.73, 0.6, 0.05]
elif(station == "plate"):
# coords = [1.11, 0.75, 0.1]
coords = [1.11, 0.35, 0.1]
elif(station == "oil"):
coords = [0.9, 0.375, 0.08]
else:
coords = [0.0, 0.0, 0.0]
return coords
|
normal
|
{
"blob_id": "86c03fa85ac405a148be13325efeaaf691d9ec26",
"index": 5223,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_station_coords(station):\n if station == 'steaks':\n coords = [0.4625, 0.375, 0.14]\n elif station == 'griddle':\n coords = [0.73, 0.375, 0.05]\n elif station == 'steak_flip_initial':\n coords = [0.73, 0.7, 0.166]\n elif station == 'steak_flip_done':\n coords = [0.73, 0.33, 0.166]\n elif station == 'steak_flip_drop':\n coords = [0.73, 0.6, 0.05]\n elif station == 'plate':\n coords = [1.11, 0.35, 0.1]\n elif station == 'oil':\n coords = [0.9, 0.375, 0.08]\n else:\n coords = [0.0, 0.0, 0.0]\n return coords\n",
"step-3": "def get_attachment_station_coords(station):\n if station == 'gripper1':\n coords = [0.48, 0.05, 0.161]\n elif station == 'gripper2':\n coords = [0.28, 0.05, 0.13]\n elif station == 'syringe':\n coords = [0.405, 0.745, 0.213]\n else:\n coords = [0.0, 0.0, 0.0]\n return coords\n\n\ndef get_station_coords(station):\n if station == 'steaks':\n coords = [0.4625, 0.375, 0.14]\n elif station == 'griddle':\n coords = [0.73, 0.375, 0.05]\n elif station == 'steak_flip_initial':\n coords = [0.73, 0.7, 0.166]\n elif station == 'steak_flip_done':\n coords = [0.73, 0.33, 0.166]\n elif station == 'steak_flip_drop':\n coords = [0.73, 0.6, 0.05]\n elif station == 'plate':\n coords = [1.11, 0.35, 0.1]\n elif station == 'oil':\n coords = [0.9, 0.375, 0.08]\n else:\n coords = [0.0, 0.0, 0.0]\n return coords\n",
"step-4": "#!/usr/bin/env python\n\ndef get_attachment_station_coords(station):\n if (station == \"gripper1\"):\n coords = [0.48, 0.05, 0.161]\n elif (station == \"gripper2\"):\n coords = [0.28, 0.05, 0.13]\n elif (station == \"syringe\"):\n coords = [0.405, 0.745, 0.213]\n else:\n coords = [0.0, 0.0, 0.0]\n # Move the gantry to the coordinates\n return coords\n\ndef get_station_coords(station):\n if(station == \"steaks\"):\n coords= [0.4625, 0.375, 0.14]\n elif(station == \"griddle\"):\n coords = [0.73, 0.375, 0.05]\n elif(station == \"steak_flip_initial\"):\n coords = [0.73, 0.7, 0.166]\n elif(station == \"steak_flip_done\"):\n coords = [0.73, 0.33, 0.166]\n elif(station == \"steak_flip_drop\"):\n coords = [0.73, 0.6, 0.05]\n elif(station == \"plate\"):\n # coords = [1.11, 0.75, 0.1]\n coords = [1.11, 0.35, 0.1]\n elif(station == \"oil\"):\n coords = [0.9, 0.375, 0.08]\n else:\n coords = [0.0, 0.0, 0.0]\n return coords",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
#조건문 예제
#fdragon50
#2016
'''
# 주석 : 도움말/덧글 / 미사용(추후 사용가능한) 코드 기록
# 여러줄의 문자열 표현은 ''' ''' 사이에 표현 가능하나 사용은 권장않음
# #으로 시작하는것은 문자열 자체가 아닌.. 무시되는 구간
# 주석은 누가봐도 이해할수있게 / 간결하게
# 더 좋은것은 누가봐도 이해할수 있는 코드임
# 가독성이 좋은 코드를 만들수 있도록..
#조건문 예제
#fdragon50
#2016
input = 11
real_fdragon50 = 11
#real_k8805 = "ab"
if real_fdragon50 == input:
print("Hello!")
#elif real_k8805 == input:
# print("Hello!")
else:
print("Who are you")
|
normal
|
{
"blob_id": "2da6debb1f9ae2c966a17fdfb3b668160a3ef8d7",
"index": 1384,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif real_fdragon50 == input:\n print('Hello!')\nelse:\n print('Who are you')\n",
"step-3": "<mask token>\ninput = 11\nreal_fdragon50 = 11\nif real_fdragon50 == input:\n print('Hello!')\nelse:\n print('Who are you')\n",
"step-4": "'''\n#조건문 예제\n#fdragon50\n#2016\n'''\n\n\n# 주석 : 도움말/덧글 / 미사용(추후 사용가능한) 코드 기록\n# 여러줄의 문자열 표현은 ''' ''' 사이에 표현 가능하나 사용은 권장않음\n# #으로 시작하는것은 문자열 자체가 아닌.. 무시되는 구간\n# 주석은 누가봐도 이해할수있게 / 간결하게\n# 더 좋은것은 누가봐도 이해할수 있는 코드임\n# 가독성이 좋은 코드를 만들수 있도록..\n\n#조건문 예제\n#fdragon50\n#2016\n\n\ninput = 11\nreal_fdragon50 = 11\n#real_k8805 = \"ab\"\n\nif real_fdragon50 == input:\n print(\"Hello!\")\n#elif real_k8805 == input:\n# print(\"Hello!\")\nelse:\n print(\"Who are you\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def merge_bitplane_to_image(bitplane, arr, color):
arr = bp.to_image(arr)
img = np.zeros(arr.shape)
img[:, :, color] = bitplane
return img
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) < 4:
print('USAGE: {0} <PATH> <COLOR> <BIT>'.format(sys.argv[0]))
print(' PATH: image path')
print(' COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2')
print(' BIT : 0~7 (0:MSB, 7:LSB)')
exit(1)
<|reserved_special_token_0|>
def merge_bitplane_to_image(bitplane, arr, color):
arr = bp.to_image(arr)
img = np.zeros(arr.shape)
img[:, :, color] = bitplane
return img
<|reserved_special_token_0|>
if len(arr.shape) < 2 or len(arr.shape) > 3:
print('Unsupported shape of image')
exit(1)
<|reserved_special_token_0|>
if COLOR != -1 and len(arr.shape) == 4:
arr = merge_bitplane_to_image(bitplane, arr, COLOR)
else:
arr = bitplane
Image.fromarray(np.uint8(arr)).show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) < 4:
print('USAGE: {0} <PATH> <COLOR> <BIT>'.format(sys.argv[0]))
print(' PATH: image path')
print(' COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2')
print(' BIT : 0~7 (0:MSB, 7:LSB)')
exit(1)
PATH = sys.argv[1]
COLOR = int(sys.argv[2])
BIT = int(sys.argv[3])
def merge_bitplane_to_image(bitplane, arr, color):
arr = bp.to_image(arr)
img = np.zeros(arr.shape)
img[:, :, color] = bitplane
return img
arr = bp.read_image_as_numpy(PATH)
if len(arr.shape) < 2 or len(arr.shape) > 3:
print('Unsupported shape of image')
exit(1)
arr = bp.to_binary(arr)
bitplane = bp.extract_bitplane(arr, COLOR, BIT)
bitplane[bitplane > 0] = 255
if COLOR != -1 and len(arr.shape) == 4:
arr = merge_bitplane_to_image(bitplane, arr, COLOR)
else:
arr = bitplane
Image.fromarray(np.uint8(arr)).show()
<|reserved_special_token_1|>
import sys
import numpy as np
import bpcs as bp
from PIL import Image
if len(sys.argv) < 4:
print('USAGE: {0} <PATH> <COLOR> <BIT>'.format(sys.argv[0]))
print(' PATH: image path')
print(' COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2')
print(' BIT : 0~7 (0:MSB, 7:LSB)')
exit(1)
PATH = sys.argv[1]
COLOR = int(sys.argv[2])
BIT = int(sys.argv[3])
def merge_bitplane_to_image(bitplane, arr, color):
arr = bp.to_image(arr)
img = np.zeros(arr.shape)
img[:, :, color] = bitplane
return img
arr = bp.read_image_as_numpy(PATH)
if len(arr.shape) < 2 or len(arr.shape) > 3:
print('Unsupported shape of image')
exit(1)
arr = bp.to_binary(arr)
bitplane = bp.extract_bitplane(arr, COLOR, BIT)
bitplane[bitplane > 0] = 255
if COLOR != -1 and len(arr.shape) == 4:
arr = merge_bitplane_to_image(bitplane, arr, COLOR)
else:
arr = bitplane
Image.fromarray(np.uint8(arr)).show()
<|reserved_special_token_1|>
import sys
import numpy as np
import bpcs as bp
from PIL import Image
if len(sys.argv)<4:
print("USAGE: {0} <PATH> <COLOR> <BIT>".format(sys.argv[0]))
print(" PATH: image path")
print(" COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2")
print(" BIT : 0~7 (0:MSB, 7:LSB)")
exit(1)
PATH = sys.argv[1]
COLOR = int(sys.argv[2])
BIT = int(sys.argv[3])
def merge_bitplane_to_image(bitplane, arr, color):
arr = bp.to_image(arr)
img = np.zeros(arr.shape)
img[:,:,color] = bitplane
return img
arr = bp.read_image_as_numpy(PATH)
if len(arr.shape)<2 or len(arr.shape)>3:
print("Unsupported shape of image")
exit(1)
arr = bp.to_binary(arr) # arr.shape = (h, w, 3(color), 8(byte)) or (h, w, 8(byte))
# arr = bp.to_image(arr) # arr.shape = (h, w, 3) or (h, w)
bitplane = bp.extract_bitplane(arr, COLOR, BIT)
bitplane[bitplane>0] = 255
if COLOR!=-1 and len(arr.shape)==4:
arr = merge_bitplane_to_image(bitplane, arr, COLOR)
else:
arr = bitplane
Image.fromarray(np.uint8(arr)).show() # show image
# Image.fromarray(np.uint8(arr)).save("test.png") # save image
|
flexible
|
{
"blob_id": "95ea811d38c314f5f19294500e16bae3d00d4fff",
"index": 1328,
"step-1": "<mask token>\n\n\ndef merge_bitplane_to_image(bitplane, arr, color):\n arr = bp.to_image(arr)\n img = np.zeros(arr.shape)\n img[:, :, color] = bitplane\n return img\n\n\n<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) < 4:\n print('USAGE: {0} <PATH> <COLOR> <BIT>'.format(sys.argv[0]))\n print(' PATH: image path')\n print(' COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2')\n print(' BIT : 0~7 (0:MSB, 7:LSB)')\n exit(1)\n<mask token>\n\n\ndef merge_bitplane_to_image(bitplane, arr, color):\n arr = bp.to_image(arr)\n img = np.zeros(arr.shape)\n img[:, :, color] = bitplane\n return img\n\n\n<mask token>\nif len(arr.shape) < 2 or len(arr.shape) > 3:\n print('Unsupported shape of image')\n exit(1)\n<mask token>\nif COLOR != -1 and len(arr.shape) == 4:\n arr = merge_bitplane_to_image(bitplane, arr, COLOR)\nelse:\n arr = bitplane\nImage.fromarray(np.uint8(arr)).show()\n",
"step-3": "<mask token>\nif len(sys.argv) < 4:\n print('USAGE: {0} <PATH> <COLOR> <BIT>'.format(sys.argv[0]))\n print(' PATH: image path')\n print(' COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2')\n print(' BIT : 0~7 (0:MSB, 7:LSB)')\n exit(1)\nPATH = sys.argv[1]\nCOLOR = int(sys.argv[2])\nBIT = int(sys.argv[3])\n\n\ndef merge_bitplane_to_image(bitplane, arr, color):\n arr = bp.to_image(arr)\n img = np.zeros(arr.shape)\n img[:, :, color] = bitplane\n return img\n\n\narr = bp.read_image_as_numpy(PATH)\nif len(arr.shape) < 2 or len(arr.shape) > 3:\n print('Unsupported shape of image')\n exit(1)\narr = bp.to_binary(arr)\nbitplane = bp.extract_bitplane(arr, COLOR, BIT)\nbitplane[bitplane > 0] = 255\nif COLOR != -1 and len(arr.shape) == 4:\n arr = merge_bitplane_to_image(bitplane, arr, COLOR)\nelse:\n arr = bitplane\nImage.fromarray(np.uint8(arr)).show()\n",
"step-4": "import sys\nimport numpy as np\nimport bpcs as bp\nfrom PIL import Image\nif len(sys.argv) < 4:\n print('USAGE: {0} <PATH> <COLOR> <BIT>'.format(sys.argv[0]))\n print(' PATH: image path')\n print(' COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2')\n print(' BIT : 0~7 (0:MSB, 7:LSB)')\n exit(1)\nPATH = sys.argv[1]\nCOLOR = int(sys.argv[2])\nBIT = int(sys.argv[3])\n\n\ndef merge_bitplane_to_image(bitplane, arr, color):\n arr = bp.to_image(arr)\n img = np.zeros(arr.shape)\n img[:, :, color] = bitplane\n return img\n\n\narr = bp.read_image_as_numpy(PATH)\nif len(arr.shape) < 2 or len(arr.shape) > 3:\n print('Unsupported shape of image')\n exit(1)\narr = bp.to_binary(arr)\nbitplane = bp.extract_bitplane(arr, COLOR, BIT)\nbitplane[bitplane > 0] = 255\nif COLOR != -1 and len(arr.shape) == 4:\n arr = merge_bitplane_to_image(bitplane, arr, COLOR)\nelse:\n arr = bitplane\nImage.fromarray(np.uint8(arr)).show()\n",
"step-5": "import sys\nimport numpy as np\nimport bpcs as bp\n\nfrom PIL import Image\n\nif len(sys.argv)<4:\n print(\"USAGE: {0} <PATH> <COLOR> <BIT>\".format(sys.argv[0]))\n print(\" PATH: image path\")\n print(\" COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2\")\n print(\" BIT : 0~7 (0:MSB, 7:LSB)\")\n exit(1)\n \nPATH = sys.argv[1]\nCOLOR = int(sys.argv[2])\nBIT = int(sys.argv[3])\n\ndef merge_bitplane_to_image(bitplane, arr, color):\n arr = bp.to_image(arr)\n img = np.zeros(arr.shape)\n img[:,:,color] = bitplane\n return img\n\narr = bp.read_image_as_numpy(PATH)\nif len(arr.shape)<2 or len(arr.shape)>3:\n print(\"Unsupported shape of image\")\n exit(1)\narr = bp.to_binary(arr) # arr.shape = (h, w, 3(color), 8(byte)) or (h, w, 8(byte))\n# arr = bp.to_image(arr) # arr.shape = (h, w, 3) or (h, w)\nbitplane = bp.extract_bitplane(arr, COLOR, BIT)\nbitplane[bitplane>0] = 255\nif COLOR!=-1 and len(arr.shape)==4:\n arr = merge_bitplane_to_image(bitplane, arr, COLOR)\nelse:\n arr = bitplane\nImage.fromarray(np.uint8(arr)).show() # show image\n# Image.fromarray(np.uint8(arr)).save(\"test.png\") # save image\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Elasticsearch(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_not_created_repo
test_not_detected_repo
test_missing_repo_name
test_no_repo_dir
test_no_repo_name
test_default
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.host_list = ['host1', 'host2']
self.repo = 'reponame'
self.repo2 = 'reponame2'
self.repo3 = 'reponame3'
self.els = Elasticsearch(self.host_list)
self.repo_dir = '/dir/path/dump2'
self.nodes_data = {'serverid1': {'name': 'hostname1', 'settings': {
'path': {'data': ['/dir/data1'], 'logs': ['/dir/logs1']}}},
'serverid2': {'name': 'hostname2', 'settings': {'path': {'data':
['/dir/data2'], 'logs': ['/dir/logs2']}}}}
self.health_data = {'status': 'green', 'cluster_name': 'ClusterName'}
self.dump = '/dir/path/dump'
self.repo_list = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}}
self.repo_dict = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}}
self.repo_dict2 = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}, 'reponame2': {'type': 'dbdump',
'settings': {'location': '/dir/path/dump2'}}}
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': False}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_not_created_repo(self, mock_es, mock_repo):
"""Function: test_not_created_repo
Description: Test with repository not created.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo_name = None
self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,
'ERROR: Repository creation failure: ' +
' reponame3, /dir/path/dump2'))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_not_detected_repo(self, mock_es, mock_repo):
"""Function: test_not_detected_repo
Description: Test with repository not detected.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo_name = None
self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,
'ERROR: Repository not detected: reponame3, /dir/path/dump2'))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': False}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_missing_repo_name(self, mock_es, mock_repo):
"""Function: test_missing_repo_name
Description: Test with missing repo named.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo = None
self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (True,
'ERROR: Missing repo name or' +
" directory: 'None', '/dir/path/dump2'"))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_no_repo_dir(self, mock_es, mock_repo):
"""Function: test_no_repo_dir
Description: Test with no repo directory passed.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(self.repo), (False, None))
self.assertEqual(els.repo_dict, self.repo_dict2)
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_no_repo_name(self, mock_es, mock_repo):
"""Function: test_no_repo_name
Description: Test with no repo named passed.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo2, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (False, None)
)
self.assertEqual(els.repo_dict, self.repo_dict2)
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_default(self, mock_es, mock_repo):
"""Function: test_default
Description: Test with default settings.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(self.repo2, self.repo_dir), (False,
None))
self.assertEqual(els.repo_dict, self.repo_dict2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Elasticsearch(object):
"""Class: ElasticSearch
Description: Class representation of the Elasticsearch class.
Methods:
__init__
"""
def __init__(self, host_list, port=9200):
"""Method: __init__
Description: Initialization instance of the class.
Arguments:
"""
self.hosts = host_list
self.port = port
self.info_status = {'cluster_name': 'ClusterName', 'name': 'servername'
}
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_not_created_repo
test_not_detected_repo
test_missing_repo_name
test_no_repo_dir
test_no_repo_name
test_default
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.host_list = ['host1', 'host2']
self.repo = 'reponame'
self.repo2 = 'reponame2'
self.repo3 = 'reponame3'
self.els = Elasticsearch(self.host_list)
self.repo_dir = '/dir/path/dump2'
self.nodes_data = {'serverid1': {'name': 'hostname1', 'settings': {
'path': {'data': ['/dir/data1'], 'logs': ['/dir/logs1']}}},
'serverid2': {'name': 'hostname2', 'settings': {'path': {'data':
['/dir/data2'], 'logs': ['/dir/logs2']}}}}
self.health_data = {'status': 'green', 'cluster_name': 'ClusterName'}
self.dump = '/dir/path/dump'
self.repo_list = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}}
self.repo_dict = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}}
self.repo_dict2 = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}, 'reponame2': {'type': 'dbdump',
'settings': {'location': '/dir/path/dump2'}}}
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': False}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_not_created_repo(self, mock_es, mock_repo):
"""Function: test_not_created_repo
Description: Test with repository not created.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo_name = None
self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,
'ERROR: Repository creation failure: ' +
' reponame3, /dir/path/dump2'))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_not_detected_repo(self, mock_es, mock_repo):
"""Function: test_not_detected_repo
Description: Test with repository not detected.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo_name = None
self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,
'ERROR: Repository not detected: reponame3, /dir/path/dump2'))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': False}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_missing_repo_name(self, mock_es, mock_repo):
"""Function: test_missing_repo_name
Description: Test with missing repo named.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo = None
self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (True,
'ERROR: Missing repo name or' +
" directory: 'None', '/dir/path/dump2'"))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_no_repo_dir(self, mock_es, mock_repo):
"""Function: test_no_repo_dir
Description: Test with no repo directory passed.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(self.repo), (False, None))
self.assertEqual(els.repo_dict, self.repo_dict2)
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_no_repo_name(self, mock_es, mock_repo):
"""Function: test_no_repo_name
Description: Test with no repo named passed.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo2, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (False, None)
)
self.assertEqual(els.repo_dict, self.repo_dict2)
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_default(self, mock_es, mock_repo):
"""Function: test_default
Description: Test with default settings.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(self.repo2, self.repo_dir), (False,
None))
self.assertEqual(els.repo_dict, self.repo_dict2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
<|reserved_special_token_0|>
sys.path.append(os.getcwd())
<|reserved_special_token_0|>
class Elasticsearch(object):
"""Class: ElasticSearch
Description: Class representation of the Elasticsearch class.
Methods:
__init__
"""
def __init__(self, host_list, port=9200):
"""Method: __init__
Description: Initialization instance of the class.
Arguments:
"""
self.hosts = host_list
self.port = port
self.info_status = {'cluster_name': 'ClusterName', 'name': 'servername'
}
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_not_created_repo
test_not_detected_repo
test_missing_repo_name
test_no_repo_dir
test_no_repo_name
test_default
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.host_list = ['host1', 'host2']
self.repo = 'reponame'
self.repo2 = 'reponame2'
self.repo3 = 'reponame3'
self.els = Elasticsearch(self.host_list)
self.repo_dir = '/dir/path/dump2'
self.nodes_data = {'serverid1': {'name': 'hostname1', 'settings': {
'path': {'data': ['/dir/data1'], 'logs': ['/dir/logs1']}}},
'serverid2': {'name': 'hostname2', 'settings': {'path': {'data':
['/dir/data2'], 'logs': ['/dir/logs2']}}}}
self.health_data = {'status': 'green', 'cluster_name': 'ClusterName'}
self.dump = '/dir/path/dump'
self.repo_list = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}}
self.repo_dict = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}}
self.repo_dict2 = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}, 'reponame2': {'type': 'dbdump',
'settings': {'location': '/dir/path/dump2'}}}
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': False}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_not_created_repo(self, mock_es, mock_repo):
"""Function: test_not_created_repo
Description: Test with repository not created.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo_name = None
self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,
'ERROR: Repository creation failure: ' +
' reponame3, /dir/path/dump2'))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_not_detected_repo(self, mock_es, mock_repo):
"""Function: test_not_detected_repo
Description: Test with repository not detected.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo_name = None
self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,
'ERROR: Repository not detected: reponame3, /dir/path/dump2'))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': False}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_missing_repo_name(self, mock_es, mock_repo):
"""Function: test_missing_repo_name
Description: Test with missing repo named.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo = None
self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (True,
'ERROR: Missing repo name or' +
" directory: 'None', '/dir/path/dump2'"))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_no_repo_dir(self, mock_es, mock_repo):
"""Function: test_no_repo_dir
Description: Test with no repo directory passed.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(self.repo), (False, None))
self.assertEqual(els.repo_dict, self.repo_dict2)
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_no_repo_name(self, mock_es, mock_repo):
"""Function: test_no_repo_name
Description: Test with no repo named passed.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo2, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (False, None)
)
self.assertEqual(els.repo_dict, self.repo_dict2)
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_default(self, mock_es, mock_repo):
"""Function: test_default
Description: Test with default settings.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(self.repo2, self.repo_dir), (False,
None))
self.assertEqual(els.repo_dict, self.repo_dict2)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
<|reserved_special_token_0|>
sys.path.append(os.getcwd())
<|reserved_special_token_0|>
__version__ = version.__version__
class Elasticsearch(object):
"""Class: ElasticSearch
Description: Class representation of the Elasticsearch class.
Methods:
__init__
"""
def __init__(self, host_list, port=9200):
"""Method: __init__
Description: Initialization instance of the class.
Arguments:
"""
self.hosts = host_list
self.port = port
self.info_status = {'cluster_name': 'ClusterName', 'name': 'servername'
}
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_not_created_repo
test_not_detected_repo
test_missing_repo_name
test_no_repo_dir
test_no_repo_name
test_default
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.host_list = ['host1', 'host2']
self.repo = 'reponame'
self.repo2 = 'reponame2'
self.repo3 = 'reponame3'
self.els = Elasticsearch(self.host_list)
self.repo_dir = '/dir/path/dump2'
self.nodes_data = {'serverid1': {'name': 'hostname1', 'settings': {
'path': {'data': ['/dir/data1'], 'logs': ['/dir/logs1']}}},
'serverid2': {'name': 'hostname2', 'settings': {'path': {'data':
['/dir/data2'], 'logs': ['/dir/logs2']}}}}
self.health_data = {'status': 'green', 'cluster_name': 'ClusterName'}
self.dump = '/dir/path/dump'
self.repo_list = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}}
self.repo_dict = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}}
self.repo_dict2 = {'reponame': {'type': 'dbdump', 'settings': {
'location': self.dump}}, 'reponame2': {'type': 'dbdump',
'settings': {'location': '/dir/path/dump2'}}}
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': False}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_not_created_repo(self, mock_es, mock_repo):
"""Function: test_not_created_repo
Description: Test with repository not created.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo_name = None
self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,
'ERROR: Repository creation failure: ' +
' reponame3, /dir/path/dump2'))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_not_detected_repo(self, mock_es, mock_repo):
"""Function: test_not_detected_repo
Description: Test with repository not detected.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo_name = None
self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,
'ERROR: Repository not detected: reponame3, /dir/path/dump2'))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': False}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_missing_repo_name(self, mock_es, mock_repo):
"""Function: test_missing_repo_name
Description: Test with missing repo named.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
els.repo = None
self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (True,
'ERROR: Missing repo name or' +
" directory: 'None', '/dir/path/dump2'"))
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_no_repo_dir(self, mock_es, mock_repo):
"""Function: test_no_repo_dir
Description: Test with no repo directory passed.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(self.repo), (False, None))
self.assertEqual(els.repo_dict, self.repo_dict2)
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_no_repo_name(self, mock_es, mock_repo):
"""Function: test_no_repo_name
Description: Test with no repo named passed.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo2, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (False, None)
)
self.assertEqual(els.repo_dict, self.repo_dict2)
@mock.patch('elastic_class.create_snapshot_repo', mock.Mock(
return_value={'acknowledged': True}))
@mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(
return_value=True))
@mock.patch('elastic_class.is_active', mock.Mock(return_value=True))
@mock.patch('elastic_class.get_repo_list')
@mock.patch('elastic_class.elasticsearch.Elasticsearch')
def test_default(self, mock_es, mock_repo):
"""Function: test_default
Description: Test with default settings.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.
repo, repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(self.repo2, self.repo_dir), (False,
None))
self.assertEqual(els.repo_dict, self.repo_dict2)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
#!/usr/bin/python
# Classification (U)
"""Program: elasticsearchrepo_create_repo.py
Description: Unit testing of create_repo in
elastic_class.ElasticSearchRepo class.
Usage:
test/unit/elastic_class/elasticsearchrepo_create_repo.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import elastic_class
import version
__version__ = version.__version__
class Elasticsearch(object):
"""Class: ElasticSearch
Description: Class representation of the Elasticsearch class.
Methods:
__init__
"""
def __init__(self, host_list, port=9200):
"""Method: __init__
Description: Initialization instance of the class.
Arguments:
"""
self.hosts = host_list
self.port = port
self.info_status = {"cluster_name": "ClusterName",
"name": "servername"}
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_not_created_repo
test_not_detected_repo
test_missing_repo_name
test_no_repo_dir
test_no_repo_name
test_default
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.host_list = ["host1", "host2"]
self.repo = "reponame"
self.repo2 = "reponame2"
self.repo3 = "reponame3"
self.els = Elasticsearch(self.host_list)
self.repo_dir = "/dir/path/dump2"
self.nodes_data = {"serverid1": {"name": "hostname1", "settings":
{"path": {"data": ["/dir/data1"],
"logs": ["/dir/logs1"]}}},
"serverid2": {"name": "hostname2", "settings":
{"path": {"data": ["/dir/data2"],
"logs": ["/dir/logs2"]}}}}
self.health_data = {"status": "green", "cluster_name": "ClusterName"}
self.dump = "/dir/path/dump"
self.repo_list = {"reponame": {"type": "dbdump", "settings":
{"location": self.dump}}}
self.repo_dict = {"reponame": {"type": "dbdump", "settings":
{"location": self.dump}}}
self.repo_dict2 = {"reponame": {"type": "dbdump", "settings":
{"location": self.dump}},
"reponame2": {"type": "dbdump", "settings":
{"location": "/dir/path/dump2"}}}
@mock.patch("elastic_class.create_snapshot_repo",
mock.Mock(return_value={"acknowledged": False}))
@mock.patch("elastic_class.ElasticSearch.update_status",
mock.Mock(return_value=True))
@mock.patch("elastic_class.is_active", mock.Mock(return_value=True))
@mock.patch("elastic_class.get_repo_list")
@mock.patch("elastic_class.elasticsearch.Elasticsearch")
def test_not_created_repo(self, mock_es, mock_repo):
"""Function: test_not_created_repo
Description: Test with repository not created.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo,
repo_dir=self.repo_dir)
els.connect()
els.repo_name = None
self.assertEqual(
els.create_repo(self.repo3, self.repo_dir),
(True,
"ERROR: Repository creation failure: " +
" reponame3, /dir/path/dump2"))
@mock.patch("elastic_class.create_snapshot_repo",
mock.Mock(return_value={"acknowledged": True}))
@mock.patch("elastic_class.ElasticSearch.update_status",
mock.Mock(return_value=True))
@mock.patch("elastic_class.is_active", mock.Mock(return_value=True))
@mock.patch("elastic_class.get_repo_list")
@mock.patch("elastic_class.elasticsearch.Elasticsearch")
def test_not_detected_repo(self, mock_es, mock_repo):
"""Function: test_not_detected_repo
Description: Test with repository not detected.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo,
repo_dir=self.repo_dir)
els.connect()
els.repo_name = None
self.assertEqual(
els.create_repo(self.repo3, self.repo_dir),
(True,
"ERROR: Repository not detected: reponame3, /dir/path/dump2"))
@mock.patch("elastic_class.create_snapshot_repo",
mock.Mock(return_value={"acknowledged": False}))
@mock.patch("elastic_class.ElasticSearch.update_status",
mock.Mock(return_value=True))
@mock.patch("elastic_class.is_active", mock.Mock(return_value=True))
@mock.patch("elastic_class.get_repo_list")
@mock.patch("elastic_class.elasticsearch.Elasticsearch")
def test_missing_repo_name(self, mock_es, mock_repo):
"""Function: test_missing_repo_name
Description: Test with missing repo named.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo,
repo_dir=self.repo_dir)
els.connect()
els.repo = None
self.assertEqual(
els.create_repo(repo_dir=self.repo_dir),
(True,
"ERROR: Missing repo name or" +
" directory: 'None', '/dir/path/dump2'"))
@mock.patch("elastic_class.create_snapshot_repo",
mock.Mock(return_value={"acknowledged": True}))
@mock.patch("elastic_class.ElasticSearch.update_status",
mock.Mock(return_value=True))
@mock.patch("elastic_class.is_active", mock.Mock(return_value=True))
@mock.patch("elastic_class.get_repo_list")
@mock.patch("elastic_class.elasticsearch.Elasticsearch")
def test_no_repo_dir(self, mock_es, mock_repo):
"""Function: test_no_repo_dir
Description: Test with no repo directory passed.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo,
repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(self.repo), (False, None))
self.assertEqual(els.repo_dict, self.repo_dict2)
@mock.patch("elastic_class.create_snapshot_repo",
mock.Mock(return_value={"acknowledged": True}))
@mock.patch("elastic_class.ElasticSearch.update_status",
mock.Mock(return_value=True))
@mock.patch("elastic_class.is_active", mock.Mock(return_value=True))
@mock.patch("elastic_class.get_repo_list")
@mock.patch("elastic_class.elasticsearch.Elasticsearch")
def test_no_repo_name(self, mock_es, mock_repo):
"""Function: test_no_repo_name
Description: Test with no repo named passed.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo2,
repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(repo_dir=self.repo_dir),
(False, None))
self.assertEqual(els.repo_dict, self.repo_dict2)
@mock.patch("elastic_class.create_snapshot_repo",
mock.Mock(return_value={"acknowledged": True}))
@mock.patch("elastic_class.ElasticSearch.update_status",
mock.Mock(return_value=True))
@mock.patch("elastic_class.is_active", mock.Mock(return_value=True))
@mock.patch("elastic_class.get_repo_list")
@mock.patch("elastic_class.elasticsearch.Elasticsearch")
def test_default(self, mock_es, mock_repo):
"""Function: test_default
Description: Test with default settings.
Arguments:
"""
mock_es.return_value = self.els
mock_repo.side_effect = [self.repo_dict, self.repo_dict2]
els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo,
repo_dir=self.repo_dir)
els.connect()
self.assertEqual(els.create_repo(self.repo2, self.repo_dir),
(False, None))
self.assertEqual(els.repo_dict, self.repo_dict2)
if __name__ == "__main__":
unittest.main()
|
flexible
|
{
"blob_id": "5c01b83634b7ae9bc691341d7432a4e59617444c",
"index": 5182,
"step-1": "<mask token>\n\n\nclass Elasticsearch(object):\n <mask token>\n <mask token>\n\n\nclass UnitTest(unittest.TestCase):\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp\n test_not_created_repo\n test_not_detected_repo\n test_missing_repo_name\n test_no_repo_dir\n test_no_repo_name\n test_default\n\n \"\"\"\n\n def setUp(self):\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n self.host_list = ['host1', 'host2']\n self.repo = 'reponame'\n self.repo2 = 'reponame2'\n self.repo3 = 'reponame3'\n self.els = Elasticsearch(self.host_list)\n self.repo_dir = '/dir/path/dump2'\n self.nodes_data = {'serverid1': {'name': 'hostname1', 'settings': {\n 'path': {'data': ['/dir/data1'], 'logs': ['/dir/logs1']}}},\n 'serverid2': {'name': 'hostname2', 'settings': {'path': {'data':\n ['/dir/data2'], 'logs': ['/dir/logs2']}}}}\n self.health_data = {'status': 'green', 'cluster_name': 'ClusterName'}\n self.dump = '/dir/path/dump'\n self.repo_list = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}}\n self.repo_dict = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}}\n self.repo_dict2 = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}, 'reponame2': {'type': 'dbdump',\n 'settings': {'location': '/dir/path/dump2'}}}\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': False}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_not_created_repo(self, mock_es, mock_repo):\n \"\"\"Function: test_not_created_repo\n\n Description: Test with repository not created.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo_name = None\n self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,\n 'ERROR: Repository creation failure: ' +\n ' reponame3, /dir/path/dump2'))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_not_detected_repo(self, mock_es, mock_repo):\n \"\"\"Function: test_not_detected_repo\n\n Description: Test with repository not detected.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo_name = None\n self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,\n 'ERROR: Repository not detected: reponame3, /dir/path/dump2'))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': False}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_missing_repo_name(self, mock_es, mock_repo):\n \"\"\"Function: test_missing_repo_name\n\n Description: Test with missing repo named.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo = None\n self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (True, \n 'ERROR: Missing repo name or' +\n \" directory: 'None', '/dir/path/dump2'\"))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_no_repo_dir(self, mock_es, mock_repo):\n \"\"\"Function: test_no_repo_dir\n\n Description: Test with no repo directory passed.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(self.repo), (False, None))\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_no_repo_name(self, mock_es, mock_repo):\n \"\"\"Function: test_no_repo_name\n\n Description: Test with no repo named passed.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo2, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (False, None)\n )\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_default(self, mock_es, mock_repo):\n \"\"\"Function: test_default\n\n Description: Test with default settings.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(self.repo2, self.repo_dir), (False,\n None))\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Elasticsearch(object):\n \"\"\"Class: ElasticSearch\n\n Description: Class representation of the Elasticsearch class.\n\n Methods:\n __init__\n\n \"\"\"\n\n def __init__(self, host_list, port=9200):\n \"\"\"Method: __init__\n\n Description: Initialization instance of the class.\n\n Arguments:\n\n \"\"\"\n self.hosts = host_list\n self.port = port\n self.info_status = {'cluster_name': 'ClusterName', 'name': 'servername'\n }\n\n\nclass UnitTest(unittest.TestCase):\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp\n test_not_created_repo\n test_not_detected_repo\n test_missing_repo_name\n test_no_repo_dir\n test_no_repo_name\n test_default\n\n \"\"\"\n\n def setUp(self):\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n self.host_list = ['host1', 'host2']\n self.repo = 'reponame'\n self.repo2 = 'reponame2'\n self.repo3 = 'reponame3'\n self.els = Elasticsearch(self.host_list)\n self.repo_dir = '/dir/path/dump2'\n self.nodes_data = {'serverid1': {'name': 'hostname1', 'settings': {\n 'path': {'data': ['/dir/data1'], 'logs': ['/dir/logs1']}}},\n 'serverid2': {'name': 'hostname2', 'settings': {'path': {'data':\n ['/dir/data2'], 'logs': ['/dir/logs2']}}}}\n self.health_data = {'status': 'green', 'cluster_name': 'ClusterName'}\n self.dump = '/dir/path/dump'\n self.repo_list = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}}\n self.repo_dict = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}}\n self.repo_dict2 = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}, 'reponame2': {'type': 'dbdump',\n 'settings': {'location': '/dir/path/dump2'}}}\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': False}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_not_created_repo(self, mock_es, mock_repo):\n \"\"\"Function: test_not_created_repo\n\n Description: Test with repository not created.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo_name = None\n self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,\n 'ERROR: Repository creation failure: ' +\n ' reponame3, /dir/path/dump2'))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_not_detected_repo(self, mock_es, mock_repo):\n \"\"\"Function: test_not_detected_repo\n\n Description: Test with repository not detected.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo_name = None\n self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,\n 'ERROR: Repository not detected: reponame3, /dir/path/dump2'))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': False}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_missing_repo_name(self, mock_es, mock_repo):\n \"\"\"Function: test_missing_repo_name\n\n Description: Test with missing repo named.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo = None\n self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (True, \n 'ERROR: Missing repo name or' +\n \" directory: 'None', '/dir/path/dump2'\"))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_no_repo_dir(self, mock_es, mock_repo):\n \"\"\"Function: test_no_repo_dir\n\n Description: Test with no repo directory passed.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(self.repo), (False, None))\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_no_repo_name(self, mock_es, mock_repo):\n \"\"\"Function: test_no_repo_name\n\n Description: Test with no repo named passed.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo2, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (False, None)\n )\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_default(self, mock_es, mock_repo):\n \"\"\"Function: test_default\n\n Description: Test with default settings.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(self.repo2, self.repo_dir), (False,\n None))\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n\n<mask token>\n",
"step-3": "<mask token>\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n<mask token>\nsys.path.append(os.getcwd())\n<mask token>\n\n\nclass Elasticsearch(object):\n \"\"\"Class: ElasticSearch\n\n Description: Class representation of the Elasticsearch class.\n\n Methods:\n __init__\n\n \"\"\"\n\n def __init__(self, host_list, port=9200):\n \"\"\"Method: __init__\n\n Description: Initialization instance of the class.\n\n Arguments:\n\n \"\"\"\n self.hosts = host_list\n self.port = port\n self.info_status = {'cluster_name': 'ClusterName', 'name': 'servername'\n }\n\n\nclass UnitTest(unittest.TestCase):\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp\n test_not_created_repo\n test_not_detected_repo\n test_missing_repo_name\n test_no_repo_dir\n test_no_repo_name\n test_default\n\n \"\"\"\n\n def setUp(self):\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n self.host_list = ['host1', 'host2']\n self.repo = 'reponame'\n self.repo2 = 'reponame2'\n self.repo3 = 'reponame3'\n self.els = Elasticsearch(self.host_list)\n self.repo_dir = '/dir/path/dump2'\n self.nodes_data = {'serverid1': {'name': 'hostname1', 'settings': {\n 'path': {'data': ['/dir/data1'], 'logs': ['/dir/logs1']}}},\n 'serverid2': {'name': 'hostname2', 'settings': {'path': {'data':\n ['/dir/data2'], 'logs': ['/dir/logs2']}}}}\n self.health_data = {'status': 'green', 'cluster_name': 'ClusterName'}\n self.dump = '/dir/path/dump'\n self.repo_list = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}}\n self.repo_dict = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}}\n self.repo_dict2 = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}, 'reponame2': {'type': 'dbdump',\n 'settings': {'location': '/dir/path/dump2'}}}\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': False}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_not_created_repo(self, mock_es, mock_repo):\n \"\"\"Function: test_not_created_repo\n\n Description: Test with repository not created.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo_name = None\n self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,\n 'ERROR: Repository creation failure: ' +\n ' reponame3, /dir/path/dump2'))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_not_detected_repo(self, mock_es, mock_repo):\n \"\"\"Function: test_not_detected_repo\n\n Description: Test with repository not detected.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo_name = None\n self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,\n 'ERROR: Repository not detected: reponame3, /dir/path/dump2'))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': False}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_missing_repo_name(self, mock_es, mock_repo):\n \"\"\"Function: test_missing_repo_name\n\n Description: Test with missing repo named.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo = None\n self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (True, \n 'ERROR: Missing repo name or' +\n \" directory: 'None', '/dir/path/dump2'\"))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_no_repo_dir(self, mock_es, mock_repo):\n \"\"\"Function: test_no_repo_dir\n\n Description: Test with no repo directory passed.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(self.repo), (False, None))\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_no_repo_name(self, mock_es, mock_repo):\n \"\"\"Function: test_no_repo_name\n\n Description: Test with no repo named passed.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo2, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (False, None)\n )\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_default(self, mock_es, mock_repo):\n \"\"\"Function: test_default\n\n Description: Test with default settings.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(self.repo2, self.repo_dir), (False,\n None))\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n<mask token>\nsys.path.append(os.getcwd())\n<mask token>\n__version__ = version.__version__\n\n\nclass Elasticsearch(object):\n \"\"\"Class: ElasticSearch\n\n Description: Class representation of the Elasticsearch class.\n\n Methods:\n __init__\n\n \"\"\"\n\n def __init__(self, host_list, port=9200):\n \"\"\"Method: __init__\n\n Description: Initialization instance of the class.\n\n Arguments:\n\n \"\"\"\n self.hosts = host_list\n self.port = port\n self.info_status = {'cluster_name': 'ClusterName', 'name': 'servername'\n }\n\n\nclass UnitTest(unittest.TestCase):\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp\n test_not_created_repo\n test_not_detected_repo\n test_missing_repo_name\n test_no_repo_dir\n test_no_repo_name\n test_default\n\n \"\"\"\n\n def setUp(self):\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n self.host_list = ['host1', 'host2']\n self.repo = 'reponame'\n self.repo2 = 'reponame2'\n self.repo3 = 'reponame3'\n self.els = Elasticsearch(self.host_list)\n self.repo_dir = '/dir/path/dump2'\n self.nodes_data = {'serverid1': {'name': 'hostname1', 'settings': {\n 'path': {'data': ['/dir/data1'], 'logs': ['/dir/logs1']}}},\n 'serverid2': {'name': 'hostname2', 'settings': {'path': {'data':\n ['/dir/data2'], 'logs': ['/dir/logs2']}}}}\n self.health_data = {'status': 'green', 'cluster_name': 'ClusterName'}\n self.dump = '/dir/path/dump'\n self.repo_list = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}}\n self.repo_dict = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}}\n self.repo_dict2 = {'reponame': {'type': 'dbdump', 'settings': {\n 'location': self.dump}}, 'reponame2': {'type': 'dbdump',\n 'settings': {'location': '/dir/path/dump2'}}}\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': False}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_not_created_repo(self, mock_es, mock_repo):\n \"\"\"Function: test_not_created_repo\n\n Description: Test with repository not created.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo_name = None\n self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,\n 'ERROR: Repository creation failure: ' +\n ' reponame3, /dir/path/dump2'))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_not_detected_repo(self, mock_es, mock_repo):\n \"\"\"Function: test_not_detected_repo\n\n Description: Test with repository not detected.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo_name = None\n self.assertEqual(els.create_repo(self.repo3, self.repo_dir), (True,\n 'ERROR: Repository not detected: reponame3, /dir/path/dump2'))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': False}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_missing_repo_name(self, mock_es, mock_repo):\n \"\"\"Function: test_missing_repo_name\n\n Description: Test with missing repo named.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n els.repo = None\n self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (True, \n 'ERROR: Missing repo name or' +\n \" directory: 'None', '/dir/path/dump2'\"))\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_no_repo_dir(self, mock_es, mock_repo):\n \"\"\"Function: test_no_repo_dir\n\n Description: Test with no repo directory passed.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(self.repo), (False, None))\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_no_repo_name(self, mock_es, mock_repo):\n \"\"\"Function: test_no_repo_name\n\n Description: Test with no repo named passed.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo2, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(repo_dir=self.repo_dir), (False, None)\n )\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n @mock.patch('elastic_class.create_snapshot_repo', mock.Mock(\n return_value={'acknowledged': True}))\n @mock.patch('elastic_class.ElasticSearch.update_status', mock.Mock(\n return_value=True))\n @mock.patch('elastic_class.is_active', mock.Mock(return_value=True))\n @mock.patch('elastic_class.get_repo_list')\n @mock.patch('elastic_class.elasticsearch.Elasticsearch')\n def test_default(self, mock_es, mock_repo):\n \"\"\"Function: test_default\n\n Description: Test with default settings.\n\n Arguments:\n\n \"\"\"\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.\n repo, repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(self.repo2, self.repo_dir), (False,\n None))\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: elasticsearchrepo_create_repo.py\n\n Description: Unit testing of create_repo in\n elastic_class.ElasticSearchRepo class.\n\n Usage:\n test/unit/elastic_class/elasticsearchrepo_create_repo.py\n\n Arguments:\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport sys\nimport os\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\n# Third-party\nimport mock\n\n# Local\nsys.path.append(os.getcwd())\nimport elastic_class\nimport version\n\n__version__ = version.__version__\n\n\nclass Elasticsearch(object):\n\n \"\"\"Class: ElasticSearch\n\n Description: Class representation of the Elasticsearch class.\n\n Methods:\n __init__\n\n \"\"\"\n\n def __init__(self, host_list, port=9200):\n\n \"\"\"Method: __init__\n\n Description: Initialization instance of the class.\n\n Arguments:\n\n \"\"\"\n\n self.hosts = host_list\n self.port = port\n self.info_status = {\"cluster_name\": \"ClusterName\",\n \"name\": \"servername\"}\n\n\nclass UnitTest(unittest.TestCase):\n\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp\n test_not_created_repo\n test_not_detected_repo\n test_missing_repo_name\n test_no_repo_dir\n test_no_repo_name\n test_default\n\n \"\"\"\n\n def setUp(self):\n\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n\n self.host_list = [\"host1\", \"host2\"]\n self.repo = \"reponame\"\n self.repo2 = \"reponame2\"\n self.repo3 = \"reponame3\"\n self.els = Elasticsearch(self.host_list)\n self.repo_dir = \"/dir/path/dump2\"\n self.nodes_data = {\"serverid1\": {\"name\": \"hostname1\", \"settings\":\n {\"path\": {\"data\": [\"/dir/data1\"],\n \"logs\": [\"/dir/logs1\"]}}},\n \"serverid2\": {\"name\": \"hostname2\", \"settings\":\n {\"path\": {\"data\": [\"/dir/data2\"],\n \"logs\": [\"/dir/logs2\"]}}}}\n self.health_data = {\"status\": \"green\", \"cluster_name\": \"ClusterName\"}\n self.dump = \"/dir/path/dump\"\n self.repo_list = {\"reponame\": {\"type\": \"dbdump\", \"settings\":\n {\"location\": self.dump}}}\n self.repo_dict = {\"reponame\": {\"type\": \"dbdump\", \"settings\":\n {\"location\": self.dump}}}\n self.repo_dict2 = {\"reponame\": {\"type\": \"dbdump\", \"settings\":\n {\"location\": self.dump}},\n \"reponame2\": {\"type\": \"dbdump\", \"settings\":\n {\"location\": \"/dir/path/dump2\"}}}\n\n @mock.patch(\"elastic_class.create_snapshot_repo\",\n mock.Mock(return_value={\"acknowledged\": False}))\n @mock.patch(\"elastic_class.ElasticSearch.update_status\",\n mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.is_active\", mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.get_repo_list\")\n @mock.patch(\"elastic_class.elasticsearch.Elasticsearch\")\n def test_not_created_repo(self, mock_es, mock_repo):\n\n \"\"\"Function: test_not_created_repo\n\n Description: Test with repository not created.\n\n Arguments:\n\n \"\"\"\n\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo,\n repo_dir=self.repo_dir)\n els.connect()\n els.repo_name = None\n self.assertEqual(\n els.create_repo(self.repo3, self.repo_dir),\n (True,\n \"ERROR: Repository creation failure: \" +\n \" reponame3, /dir/path/dump2\"))\n\n @mock.patch(\"elastic_class.create_snapshot_repo\",\n mock.Mock(return_value={\"acknowledged\": True}))\n @mock.patch(\"elastic_class.ElasticSearch.update_status\",\n mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.is_active\", mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.get_repo_list\")\n @mock.patch(\"elastic_class.elasticsearch.Elasticsearch\")\n def test_not_detected_repo(self, mock_es, mock_repo):\n\n \"\"\"Function: test_not_detected_repo\n\n Description: Test with repository not detected.\n\n Arguments:\n\n \"\"\"\n\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo,\n repo_dir=self.repo_dir)\n els.connect()\n els.repo_name = None\n self.assertEqual(\n els.create_repo(self.repo3, self.repo_dir),\n (True,\n \"ERROR: Repository not detected: reponame3, /dir/path/dump2\"))\n\n @mock.patch(\"elastic_class.create_snapshot_repo\",\n mock.Mock(return_value={\"acknowledged\": False}))\n @mock.patch(\"elastic_class.ElasticSearch.update_status\",\n mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.is_active\", mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.get_repo_list\")\n @mock.patch(\"elastic_class.elasticsearch.Elasticsearch\")\n def test_missing_repo_name(self, mock_es, mock_repo):\n\n \"\"\"Function: test_missing_repo_name\n\n Description: Test with missing repo named.\n\n Arguments:\n\n \"\"\"\n\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo,\n repo_dir=self.repo_dir)\n els.connect()\n els.repo = None\n self.assertEqual(\n els.create_repo(repo_dir=self.repo_dir),\n (True,\n \"ERROR: Missing repo name or\" +\n \" directory: 'None', '/dir/path/dump2'\"))\n\n @mock.patch(\"elastic_class.create_snapshot_repo\",\n mock.Mock(return_value={\"acknowledged\": True}))\n @mock.patch(\"elastic_class.ElasticSearch.update_status\",\n mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.is_active\", mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.get_repo_list\")\n @mock.patch(\"elastic_class.elasticsearch.Elasticsearch\")\n def test_no_repo_dir(self, mock_es, mock_repo):\n\n \"\"\"Function: test_no_repo_dir\n\n Description: Test with no repo directory passed.\n\n Arguments:\n\n \"\"\"\n\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo,\n repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(self.repo), (False, None))\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n @mock.patch(\"elastic_class.create_snapshot_repo\",\n mock.Mock(return_value={\"acknowledged\": True}))\n @mock.patch(\"elastic_class.ElasticSearch.update_status\",\n mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.is_active\", mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.get_repo_list\")\n @mock.patch(\"elastic_class.elasticsearch.Elasticsearch\")\n def test_no_repo_name(self, mock_es, mock_repo):\n\n \"\"\"Function: test_no_repo_name\n\n Description: Test with no repo named passed.\n\n Arguments:\n\n \"\"\"\n\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo2,\n repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(repo_dir=self.repo_dir),\n (False, None))\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n @mock.patch(\"elastic_class.create_snapshot_repo\",\n mock.Mock(return_value={\"acknowledged\": True}))\n @mock.patch(\"elastic_class.ElasticSearch.update_status\",\n mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.is_active\", mock.Mock(return_value=True))\n @mock.patch(\"elastic_class.get_repo_list\")\n @mock.patch(\"elastic_class.elasticsearch.Elasticsearch\")\n def test_default(self, mock_es, mock_repo):\n\n \"\"\"Function: test_default\n\n Description: Test with default settings.\n\n Arguments:\n\n \"\"\"\n\n mock_es.return_value = self.els\n mock_repo.side_effect = [self.repo_dict, self.repo_dict2]\n\n els = elastic_class.ElasticSearchRepo(self.host_list, repo=self.repo,\n repo_dir=self.repo_dir)\n els.connect()\n self.assertEqual(els.create_repo(self.repo2, self.repo_dir),\n (False, None))\n self.assertEqual(els.repo_dict, self.repo_dict2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
10,
12,
13,
14,
16
]
}
|
[
10,
12,
13,
14,
16
] |
import math
import pygame
import numpy as np
from main import Snake, SCREEN_WIDTH, SCREEN_HEIGHT, drawGrid, GRIDSIZE
from random import randint
FOOD_REWARD = 5
DEATH_PENALTY = 10
MOVE_PENALTY = 0.1
LIVES = 5
SQUARE_COLOR = (80,80,80)
SNAKE_HEAD_COLOR = ((0,51,0), (0,0,153), (102,0,102))
SNAKE_COLOR = ((154,205,50), (50,50,250), (50,0,250))
FOOD_COLOR = (255,69,0)
class SnakeGame:
def __init__(self, board_width = 10, board_height = 10, gui = False, enemy_epsilon=0.1):
self.score = 0
self.board = {'width': board_width, 'height': board_height}
self.gui = gui
self.lives = LIVES
self.player = []
self.enemy = []
self.enemy_epsilon = enemy_epsilon
self.food = []
def start(self):
'''
:return: [lives, score, player, enemy, food]
'''
self.player_init(LIVES)
self.enemy_init()
self.generate_food()
if self.gui: self.render_init()
return self.generate_observations()
def player_init(self, lives=LIVES):
x = randint(3, math.ceil(self.board["width"] / 2) - 1)
y = randint(3, self.board["height"] - 3)
self.player = []
vertical = randint(0, 1) == 0
for i in range(3):
point = [x + i, y] if vertical else [x, y + i]
self.player.insert(0, point)
self.lives = lives
def enemy_init(self):
x = randint(math.ceil(self.board["width"] / 2), self.board["width"] - 3)
y = randint(3, self.board["height"] - 3)
self.enemy = []
vertical = randint(0, 1) == 0
for i in range(3):
point = [x + i, y] if vertical else [x, y + i]
self.enemy.insert(0, point)
if self.enemy[0] in self.player[1:-1]:
self.enemy_init() # retry
def generate_food(self):
food = []
while not food:
food = [randint(1, self.board["width"]), randint(1, self.board["height"])]
if food in self.enemy: food = []
elif food in self.player: food = []
self.food = food
def get_enemy_movement(self):
'''
0 - UP, (-1, 0)
1 - RIGHT, (
2 - DOWN,
3 - LEFT
'''
if np.random.random() <= self.enemy_epsilon:
return randint(0, 3)
if self.food[0] > self.enemy[0][0]:
return 2
elif self.food[0] < self.enemy[0][0]:
return 0
elif self.food[1] > self.enemy[0][1]:
return 1
elif self.food[1] < self.enemy[0][1]:
return 3
return randint(0, 3)
def step(self, key):
'''
0 - UP,
1 - RIGHT,
2 - DOWN,
3 - LEFT
:param key:
:return: [lives, score, player, enemy, food]
'''
if self.is_done() :
self.end_game()
if not self.food:
self.generate_food()
self.create_new_point(self.player, key)
self.create_new_point(self.enemy, self.get_enemy_movement())
player_ate = False
if self.food_eaten(self.player):
self.score += FOOD_REWARD
self.generate_food()
player_ate = True
else:
self.remove_last_point(self.player)
self.score -= MOVE_PENALTY
if (not player_ate) and self.food_eaten(self.enemy):
self.generate_food()
else:
self.remove_last_point(self.enemy)
self.check_collisions()
if not self.food:
self.generate_food()
return self.generate_observations()
def create_new_point(self, snake, key):
new_point = [snake[0][0], snake[0][1]]
if key == 0: # UP
new_point[0] -= 1
elif key == 1: # RIGHT
new_point[1] += 1
elif key == 2: # DOWN
new_point[0] += 1
elif key == 3: # LEFT
new_point[1] -= 1
snake.insert(0, new_point)
def food_eaten(self, snake):
return self.food in snake
def remove_last_point(self, snake):
snake.pop()
def check_collisions(self):
state = 0
# 0 -> no collision,
# 1 -> player collision,
# 2 -> enemy collision
player_collided = False
enemy_collided = False
if (self.player[0][0] == 0 or
self.player[0][0] == self.board["width"] or
self.player[0][1] == 0 or
self.player[0][1] == self.board["height"] or
self.player[0] in self.player[1:-1] or
self.player[0] in self.enemy):
player_collided = True
if (self.enemy[0][0] == 0 or
self.enemy[0][0] == self.board["width"] or
self.enemy[0][1] == 0 or
self.enemy[0][1] == self.board["height"] or
self.enemy[0] in self.player or
self.enemy[0] in self.enemy[1:-1]):
enemy_collided = True
if player_collided:
self.lives -= 1
if not self.is_done():
self.player_init(self.lives)
if enemy_collided:
self.enemy_init() # enemy moves randomly but has infinite lives
def generate_observations(self):
'''
:return: [lives, score, player, enemy, food]
'''
return self.lives, self.score, self.player, self.enemy, self.food
'''Methods for Rendering the game'''
def render_init(self):
pygame.init()
self.clock = pygame.time.Clock()
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
drawGrid(self.surface)
self.myfont = pygame.font.SysFont("bahnschrift", 20)
def step_render(self, key):
'''
:return: [lives, score, player, enemy, food]
'''
self.clock.tick(3)
drawGrid(self.surface)
if not self.food:
self.generate_food()
_lives, _score, _player, _enemy, _food = self.step(key)
self.draw_snake(self.player, self.surface, SNAKE_COLOR[0], SNAKE_HEAD_COLOR[0])
self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1], SNAKE_HEAD_COLOR[1])
if not self.food:
self.generate_food()
self.draw_food(self.surface, FOOD_COLOR)
self.screen.blit(self.surface, (0, 0))
text1 = self.myfont.render("Score: {0} Lives: {1}".format(round(self.score, 2), self.lives), True, (250, 250, 250))
# text2 = myfont.render("Score AI {0}".format(enemy.score), 1, (250, 250, 250))
self.screen.blit(text1, (5, 10))
# screen.blit(text2, (SCREEN_WIDTH - 120, 10))
pygame.display.update()
return _lives, _score, _player, _enemy, _food
def draw_snake(self, snake, surface, color, head_color):
drew_head = False
for p in snake:
curr_color = color
if not drew_head:
curr_color = head_color
drew_head = True
r = pygame.Rect((p[0]*GRIDSIZE, p[1]*GRIDSIZE), (GRIDSIZE, GRIDSIZE))
pygame.draw.rect(surface, curr_color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
def draw_food(self, surface, color):
r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE), (GRIDSIZE, GRIDSIZE))
pygame.draw.rect(surface, color, r)
pygame.draw.rect(surface, SQUARE_COLOR, r, 1)
def is_done(self):
return self.lives <= 0
def render_destroy(self):
print("Snake Player Final Score:", self.score)
def end_game(self):
if self.gui: self.render_destroy()
raise Exception("Game over")
|
normal
|
{
"blob_id": "3bb408f2b2ac63a2555258c05844881ccdfc5057",
"index": 5428,
"step-1": "<mask token>\n\n\nclass SnakeGame:\n\n def __init__(self, board_width=10, board_height=10, gui=False,\n enemy_epsilon=0.1):\n self.score = 0\n self.board = {'width': board_width, 'height': board_height}\n self.gui = gui\n self.lives = LIVES\n self.player = []\n self.enemy = []\n self.enemy_epsilon = enemy_epsilon\n self.food = []\n <mask token>\n <mask token>\n <mask token>\n\n def generate_food(self):\n food = []\n while not food:\n food = [randint(1, self.board['width']), randint(1, self.board[\n 'height'])]\n if food in self.enemy:\n food = []\n elif food in self.player:\n food = []\n self.food = food\n <mask token>\n <mask token>\n\n def create_new_point(self, snake, key):\n new_point = [snake[0][0], snake[0][1]]\n if key == 0:\n new_point[0] -= 1\n elif key == 1:\n new_point[1] += 1\n elif key == 2:\n new_point[0] += 1\n elif key == 3:\n new_point[1] -= 1\n snake.insert(0, new_point)\n\n def food_eaten(self, snake):\n return self.food in snake\n\n def remove_last_point(self, snake):\n snake.pop()\n <mask token>\n\n def generate_observations(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n return self.lives, self.score, self.player, self.enemy, self.food\n <mask token>\n\n def render_init(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n drawGrid(self.surface)\n self.myfont = pygame.font.SysFont('bahnschrift', 20)\n\n def step_render(self, key):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.clock.tick(3)\n drawGrid(self.surface)\n if not self.food:\n self.generate_food()\n _lives, _score, _player, _enemy, _food = self.step(key)\n self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],\n SNAKE_HEAD_COLOR[0])\n self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],\n SNAKE_HEAD_COLOR[1])\n if not self.food:\n self.generate_food()\n self.draw_food(self.surface, FOOD_COLOR)\n self.screen.blit(self.surface, (0, 0))\n text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(\n self.score, 2), self.lives), True, (250, 250, 250))\n self.screen.blit(text1, (5, 10))\n pygame.display.update()\n return _lives, _score, _player, _enemy, _food\n\n def draw_snake(self, snake, surface, color, head_color):\n drew_head = False\n for p in snake:\n curr_color = color\n if not drew_head:\n curr_color = head_color\n drew_head = True\n r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,\n GRIDSIZE))\n pygame.draw.rect(surface, curr_color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def draw_food(self, surface, color):\n r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),\n (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n <mask token>\n <mask token>\n\n def end_game(self):\n if self.gui:\n self.render_destroy()\n raise Exception('Game over')\n",
"step-2": "<mask token>\n\n\nclass SnakeGame:\n\n def __init__(self, board_width=10, board_height=10, gui=False,\n enemy_epsilon=0.1):\n self.score = 0\n self.board = {'width': board_width, 'height': board_height}\n self.gui = gui\n self.lives = LIVES\n self.player = []\n self.enemy = []\n self.enemy_epsilon = enemy_epsilon\n self.food = []\n\n def start(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.player_init(LIVES)\n self.enemy_init()\n self.generate_food()\n if self.gui:\n self.render_init()\n return self.generate_observations()\n <mask token>\n <mask token>\n\n def generate_food(self):\n food = []\n while not food:\n food = [randint(1, self.board['width']), randint(1, self.board[\n 'height'])]\n if food in self.enemy:\n food = []\n elif food in self.player:\n food = []\n self.food = food\n <mask token>\n <mask token>\n\n def create_new_point(self, snake, key):\n new_point = [snake[0][0], snake[0][1]]\n if key == 0:\n new_point[0] -= 1\n elif key == 1:\n new_point[1] += 1\n elif key == 2:\n new_point[0] += 1\n elif key == 3:\n new_point[1] -= 1\n snake.insert(0, new_point)\n\n def food_eaten(self, snake):\n return self.food in snake\n\n def remove_last_point(self, snake):\n snake.pop()\n\n def check_collisions(self):\n state = 0\n player_collided = False\n enemy_collided = False\n if self.player[0][0] == 0 or self.player[0][0] == self.board['width'\n ] or self.player[0][1] == 0 or self.player[0][1] == self.board[\n 'height'] or self.player[0] in self.player[1:-1] or self.player[0\n ] in self.enemy:\n player_collided = True\n if self.enemy[0][0] == 0 or self.enemy[0][0] == self.board['width'\n ] or self.enemy[0][1] == 0 or self.enemy[0][1] == self.board[\n 'height'] or self.enemy[0] in self.player or self.enemy[0\n ] in self.enemy[1:-1]:\n enemy_collided = True\n if player_collided:\n self.lives -= 1\n if not self.is_done():\n self.player_init(self.lives)\n if enemy_collided:\n self.enemy_init()\n\n def generate_observations(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n return self.lives, self.score, self.player, self.enemy, self.food\n <mask token>\n\n def render_init(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n drawGrid(self.surface)\n self.myfont = pygame.font.SysFont('bahnschrift', 20)\n\n def step_render(self, key):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.clock.tick(3)\n drawGrid(self.surface)\n if not self.food:\n self.generate_food()\n _lives, _score, _player, _enemy, _food = self.step(key)\n self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],\n SNAKE_HEAD_COLOR[0])\n self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],\n SNAKE_HEAD_COLOR[1])\n if not self.food:\n self.generate_food()\n self.draw_food(self.surface, FOOD_COLOR)\n self.screen.blit(self.surface, (0, 0))\n text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(\n self.score, 2), self.lives), True, (250, 250, 250))\n self.screen.blit(text1, (5, 10))\n pygame.display.update()\n return _lives, _score, _player, _enemy, _food\n\n def draw_snake(self, snake, surface, color, head_color):\n drew_head = False\n for p in snake:\n curr_color = color\n if not drew_head:\n curr_color = head_color\n drew_head = True\n r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,\n GRIDSIZE))\n pygame.draw.rect(surface, curr_color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def draw_food(self, surface, color):\n r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),\n (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n <mask token>\n <mask token>\n\n def end_game(self):\n if self.gui:\n self.render_destroy()\n raise Exception('Game over')\n",
"step-3": "<mask token>\n\n\nclass SnakeGame:\n\n def __init__(self, board_width=10, board_height=10, gui=False,\n enemy_epsilon=0.1):\n self.score = 0\n self.board = {'width': board_width, 'height': board_height}\n self.gui = gui\n self.lives = LIVES\n self.player = []\n self.enemy = []\n self.enemy_epsilon = enemy_epsilon\n self.food = []\n\n def start(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.player_init(LIVES)\n self.enemy_init()\n self.generate_food()\n if self.gui:\n self.render_init()\n return self.generate_observations()\n\n def player_init(self, lives=LIVES):\n x = randint(3, math.ceil(self.board['width'] / 2) - 1)\n y = randint(3, self.board['height'] - 3)\n self.player = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.player.insert(0, point)\n self.lives = lives\n\n def enemy_init(self):\n x = randint(math.ceil(self.board['width'] / 2), self.board['width'] - 3\n )\n y = randint(3, self.board['height'] - 3)\n self.enemy = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.enemy.insert(0, point)\n if self.enemy[0] in self.player[1:-1]:\n self.enemy_init()\n\n def generate_food(self):\n food = []\n while not food:\n food = [randint(1, self.board['width']), randint(1, self.board[\n 'height'])]\n if food in self.enemy:\n food = []\n elif food in self.player:\n food = []\n self.food = food\n\n def get_enemy_movement(self):\n \"\"\"\n 0 - UP, (-1, 0)\n 1 - RIGHT, (\n 2 - DOWN,\n 3 - LEFT\n \"\"\"\n if np.random.random() <= self.enemy_epsilon:\n return randint(0, 3)\n if self.food[0] > self.enemy[0][0]:\n return 2\n elif self.food[0] < self.enemy[0][0]:\n return 0\n elif self.food[1] > self.enemy[0][1]:\n return 1\n elif self.food[1] < self.enemy[0][1]:\n return 3\n return randint(0, 3)\n\n def step(self, key):\n \"\"\"\n 0 - UP,\n 1 - RIGHT,\n 2 - DOWN,\n 3 - LEFT\n :param key:\n :return: [lives, score, player, enemy, food]\n \"\"\"\n if self.is_done():\n self.end_game()\n if not self.food:\n self.generate_food()\n self.create_new_point(self.player, key)\n self.create_new_point(self.enemy, self.get_enemy_movement())\n player_ate = False\n if self.food_eaten(self.player):\n self.score += FOOD_REWARD\n self.generate_food()\n player_ate = True\n else:\n self.remove_last_point(self.player)\n self.score -= MOVE_PENALTY\n if not player_ate and self.food_eaten(self.enemy):\n self.generate_food()\n else:\n self.remove_last_point(self.enemy)\n self.check_collisions()\n if not self.food:\n self.generate_food()\n return self.generate_observations()\n\n def create_new_point(self, snake, key):\n new_point = [snake[0][0], snake[0][1]]\n if key == 0:\n new_point[0] -= 1\n elif key == 1:\n new_point[1] += 1\n elif key == 2:\n new_point[0] += 1\n elif key == 3:\n new_point[1] -= 1\n snake.insert(0, new_point)\n\n def food_eaten(self, snake):\n return self.food in snake\n\n def remove_last_point(self, snake):\n snake.pop()\n\n def check_collisions(self):\n state = 0\n player_collided = False\n enemy_collided = False\n if self.player[0][0] == 0 or self.player[0][0] == self.board['width'\n ] or self.player[0][1] == 0 or self.player[0][1] == self.board[\n 'height'] or self.player[0] in self.player[1:-1] or self.player[0\n ] in self.enemy:\n player_collided = True\n if self.enemy[0][0] == 0 or self.enemy[0][0] == self.board['width'\n ] or self.enemy[0][1] == 0 or self.enemy[0][1] == self.board[\n 'height'] or self.enemy[0] in self.player or self.enemy[0\n ] in self.enemy[1:-1]:\n enemy_collided = True\n if player_collided:\n self.lives -= 1\n if not self.is_done():\n self.player_init(self.lives)\n if enemy_collided:\n self.enemy_init()\n\n def generate_observations(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n return self.lives, self.score, self.player, self.enemy, self.food\n <mask token>\n\n def render_init(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n drawGrid(self.surface)\n self.myfont = pygame.font.SysFont('bahnschrift', 20)\n\n def step_render(self, key):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.clock.tick(3)\n drawGrid(self.surface)\n if not self.food:\n self.generate_food()\n _lives, _score, _player, _enemy, _food = self.step(key)\n self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],\n SNAKE_HEAD_COLOR[0])\n self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],\n SNAKE_HEAD_COLOR[1])\n if not self.food:\n self.generate_food()\n self.draw_food(self.surface, FOOD_COLOR)\n self.screen.blit(self.surface, (0, 0))\n text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(\n self.score, 2), self.lives), True, (250, 250, 250))\n self.screen.blit(text1, (5, 10))\n pygame.display.update()\n return _lives, _score, _player, _enemy, _food\n\n def draw_snake(self, snake, surface, color, head_color):\n drew_head = False\n for p in snake:\n curr_color = color\n if not drew_head:\n curr_color = head_color\n drew_head = True\n r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,\n GRIDSIZE))\n pygame.draw.rect(surface, curr_color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def draw_food(self, surface, color):\n r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),\n (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n <mask token>\n <mask token>\n\n def end_game(self):\n if self.gui:\n self.render_destroy()\n raise Exception('Game over')\n",
"step-4": "<mask token>\nFOOD_REWARD = 5\nDEATH_PENALTY = 10\nMOVE_PENALTY = 0.1\nLIVES = 5\nSQUARE_COLOR = 80, 80, 80\nSNAKE_HEAD_COLOR = (0, 51, 0), (0, 0, 153), (102, 0, 102)\nSNAKE_COLOR = (154, 205, 50), (50, 50, 250), (50, 0, 250)\nFOOD_COLOR = 255, 69, 0\n\n\nclass SnakeGame:\n\n def __init__(self, board_width=10, board_height=10, gui=False,\n enemy_epsilon=0.1):\n self.score = 0\n self.board = {'width': board_width, 'height': board_height}\n self.gui = gui\n self.lives = LIVES\n self.player = []\n self.enemy = []\n self.enemy_epsilon = enemy_epsilon\n self.food = []\n\n def start(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.player_init(LIVES)\n self.enemy_init()\n self.generate_food()\n if self.gui:\n self.render_init()\n return self.generate_observations()\n\n def player_init(self, lives=LIVES):\n x = randint(3, math.ceil(self.board['width'] / 2) - 1)\n y = randint(3, self.board['height'] - 3)\n self.player = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.player.insert(0, point)\n self.lives = lives\n\n def enemy_init(self):\n x = randint(math.ceil(self.board['width'] / 2), self.board['width'] - 3\n )\n y = randint(3, self.board['height'] - 3)\n self.enemy = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.enemy.insert(0, point)\n if self.enemy[0] in self.player[1:-1]:\n self.enemy_init()\n\n def generate_food(self):\n food = []\n while not food:\n food = [randint(1, self.board['width']), randint(1, self.board[\n 'height'])]\n if food in self.enemy:\n food = []\n elif food in self.player:\n food = []\n self.food = food\n\n def get_enemy_movement(self):\n \"\"\"\n 0 - UP, (-1, 0)\n 1 - RIGHT, (\n 2 - DOWN,\n 3 - LEFT\n \"\"\"\n if np.random.random() <= self.enemy_epsilon:\n return randint(0, 3)\n if self.food[0] > self.enemy[0][0]:\n return 2\n elif self.food[0] < self.enemy[0][0]:\n return 0\n elif self.food[1] > self.enemy[0][1]:\n return 1\n elif self.food[1] < self.enemy[0][1]:\n return 3\n return randint(0, 3)\n\n def step(self, key):\n \"\"\"\n 0 - UP,\n 1 - RIGHT,\n 2 - DOWN,\n 3 - LEFT\n :param key:\n :return: [lives, score, player, enemy, food]\n \"\"\"\n if self.is_done():\n self.end_game()\n if not self.food:\n self.generate_food()\n self.create_new_point(self.player, key)\n self.create_new_point(self.enemy, self.get_enemy_movement())\n player_ate = False\n if self.food_eaten(self.player):\n self.score += FOOD_REWARD\n self.generate_food()\n player_ate = True\n else:\n self.remove_last_point(self.player)\n self.score -= MOVE_PENALTY\n if not player_ate and self.food_eaten(self.enemy):\n self.generate_food()\n else:\n self.remove_last_point(self.enemy)\n self.check_collisions()\n if not self.food:\n self.generate_food()\n return self.generate_observations()\n\n def create_new_point(self, snake, key):\n new_point = [snake[0][0], snake[0][1]]\n if key == 0:\n new_point[0] -= 1\n elif key == 1:\n new_point[1] += 1\n elif key == 2:\n new_point[0] += 1\n elif key == 3:\n new_point[1] -= 1\n snake.insert(0, new_point)\n\n def food_eaten(self, snake):\n return self.food in snake\n\n def remove_last_point(self, snake):\n snake.pop()\n\n def check_collisions(self):\n state = 0\n player_collided = False\n enemy_collided = False\n if self.player[0][0] == 0 or self.player[0][0] == self.board['width'\n ] or self.player[0][1] == 0 or self.player[0][1] == self.board[\n 'height'] or self.player[0] in self.player[1:-1] or self.player[0\n ] in self.enemy:\n player_collided = True\n if self.enemy[0][0] == 0 or self.enemy[0][0] == self.board['width'\n ] or self.enemy[0][1] == 0 or self.enemy[0][1] == self.board[\n 'height'] or self.enemy[0] in self.player or self.enemy[0\n ] in self.enemy[1:-1]:\n enemy_collided = True\n if player_collided:\n self.lives -= 1\n if not self.is_done():\n self.player_init(self.lives)\n if enemy_collided:\n self.enemy_init()\n\n def generate_observations(self):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n return self.lives, self.score, self.player, self.enemy, self.food\n \"\"\"Methods for Rendering the game\"\"\"\n\n def render_init(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n drawGrid(self.surface)\n self.myfont = pygame.font.SysFont('bahnschrift', 20)\n\n def step_render(self, key):\n \"\"\"\n :return: [lives, score, player, enemy, food]\n \"\"\"\n self.clock.tick(3)\n drawGrid(self.surface)\n if not self.food:\n self.generate_food()\n _lives, _score, _player, _enemy, _food = self.step(key)\n self.draw_snake(self.player, self.surface, SNAKE_COLOR[0],\n SNAKE_HEAD_COLOR[0])\n self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1],\n SNAKE_HEAD_COLOR[1])\n if not self.food:\n self.generate_food()\n self.draw_food(self.surface, FOOD_COLOR)\n self.screen.blit(self.surface, (0, 0))\n text1 = self.myfont.render('Score: {0} Lives: {1}'.format(round(\n self.score, 2), self.lives), True, (250, 250, 250))\n self.screen.blit(text1, (5, 10))\n pygame.display.update()\n return _lives, _score, _player, _enemy, _food\n\n def draw_snake(self, snake, surface, color, head_color):\n drew_head = False\n for p in snake:\n curr_color = color\n if not drew_head:\n curr_color = head_color\n drew_head = True\n r = pygame.Rect((p[0] * GRIDSIZE, p[1] * GRIDSIZE), (GRIDSIZE,\n GRIDSIZE))\n pygame.draw.rect(surface, curr_color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def draw_food(self, surface, color):\n r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE),\n (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def is_done(self):\n return self.lives <= 0\n\n def render_destroy(self):\n print('Snake Player Final Score:', self.score)\n\n def end_game(self):\n if self.gui:\n self.render_destroy()\n raise Exception('Game over')\n",
"step-5": "import math\n\nimport pygame\nimport numpy as np\nfrom main import Snake, SCREEN_WIDTH, SCREEN_HEIGHT, drawGrid, GRIDSIZE\nfrom random import randint\n\nFOOD_REWARD = 5\nDEATH_PENALTY = 10\nMOVE_PENALTY = 0.1\nLIVES = 5\n\nSQUARE_COLOR = (80,80,80)\nSNAKE_HEAD_COLOR = ((0,51,0), (0,0,153), (102,0,102))\nSNAKE_COLOR = ((154,205,50), (50,50,250), (50,0,250))\nFOOD_COLOR = (255,69,0)\n\nclass SnakeGame:\n def __init__(self, board_width = 10, board_height = 10, gui = False, enemy_epsilon=0.1):\n self.score = 0\n self.board = {'width': board_width, 'height': board_height}\n self.gui = gui\n self.lives = LIVES\n self.player = []\n self.enemy = []\n self.enemy_epsilon = enemy_epsilon\n self.food = []\n\n def start(self):\n '''\n :return: [lives, score, player, enemy, food]\n '''\n self.player_init(LIVES)\n self.enemy_init()\n self.generate_food()\n if self.gui: self.render_init()\n return self.generate_observations()\n\n def player_init(self, lives=LIVES):\n x = randint(3, math.ceil(self.board[\"width\"] / 2) - 1)\n y = randint(3, self.board[\"height\"] - 3)\n self.player = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.player.insert(0, point)\n self.lives = lives\n\n def enemy_init(self):\n x = randint(math.ceil(self.board[\"width\"] / 2), self.board[\"width\"] - 3)\n y = randint(3, self.board[\"height\"] - 3)\n self.enemy = []\n vertical = randint(0, 1) == 0\n for i in range(3):\n point = [x + i, y] if vertical else [x, y + i]\n self.enemy.insert(0, point)\n\n if self.enemy[0] in self.player[1:-1]:\n self.enemy_init() # retry\n\n def generate_food(self):\n food = []\n while not food:\n food = [randint(1, self.board[\"width\"]), randint(1, self.board[\"height\"])]\n if food in self.enemy: food = []\n elif food in self.player: food = []\n self.food = food\n\n def get_enemy_movement(self):\n '''\n 0 - UP, (-1, 0)\n 1 - RIGHT, (\n 2 - DOWN,\n 3 - LEFT\n '''\n if np.random.random() <= self.enemy_epsilon:\n return randint(0, 3)\n\n if self.food[0] > self.enemy[0][0]:\n return 2\n elif self.food[0] < self.enemy[0][0]:\n return 0\n elif self.food[1] > self.enemy[0][1]:\n return 1\n elif self.food[1] < self.enemy[0][1]:\n return 3\n\n return randint(0, 3)\n\n def step(self, key):\n '''\n 0 - UP,\n 1 - RIGHT,\n 2 - DOWN,\n 3 - LEFT\n :param key:\n :return: [lives, score, player, enemy, food]\n '''\n\n if self.is_done() :\n self.end_game()\n\n if not self.food:\n self.generate_food()\n\n self.create_new_point(self.player, key)\n self.create_new_point(self.enemy, self.get_enemy_movement())\n\n player_ate = False\n if self.food_eaten(self.player):\n self.score += FOOD_REWARD\n self.generate_food()\n player_ate = True\n else:\n self.remove_last_point(self.player)\n self.score -= MOVE_PENALTY\n\n if (not player_ate) and self.food_eaten(self.enemy):\n self.generate_food()\n else:\n self.remove_last_point(self.enemy)\n\n self.check_collisions()\n\n if not self.food:\n self.generate_food()\n\n return self.generate_observations()\n\n def create_new_point(self, snake, key):\n new_point = [snake[0][0], snake[0][1]]\n if key == 0: # UP\n new_point[0] -= 1\n elif key == 1: # RIGHT\n new_point[1] += 1\n elif key == 2: # DOWN\n new_point[0] += 1\n elif key == 3: # LEFT\n new_point[1] -= 1\n snake.insert(0, new_point)\n\n def food_eaten(self, snake):\n return self.food in snake\n\n def remove_last_point(self, snake):\n snake.pop()\n\n\n def check_collisions(self):\n\n state = 0\n # 0 -> no collision,\n # 1 -> player collision,\n # 2 -> enemy collision\n\n player_collided = False\n enemy_collided = False\n\n if (self.player[0][0] == 0 or\n self.player[0][0] == self.board[\"width\"] or\n self.player[0][1] == 0 or\n self.player[0][1] == self.board[\"height\"] or\n self.player[0] in self.player[1:-1] or\n self.player[0] in self.enemy):\n player_collided = True\n\n if (self.enemy[0][0] == 0 or\n self.enemy[0][0] == self.board[\"width\"] or\n self.enemy[0][1] == 0 or\n self.enemy[0][1] == self.board[\"height\"] or\n self.enemy[0] in self.player or\n self.enemy[0] in self.enemy[1:-1]):\n enemy_collided = True\n\n if player_collided:\n self.lives -= 1\n if not self.is_done():\n self.player_init(self.lives)\n\n if enemy_collided:\n self.enemy_init() # enemy moves randomly but has infinite lives\n\n def generate_observations(self):\n '''\n :return: [lives, score, player, enemy, food]\n '''\n return self.lives, self.score, self.player, self.enemy, self.food\n\n '''Methods for Rendering the game'''\n\n def render_init(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)\n\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n drawGrid(self.surface)\n self.myfont = pygame.font.SysFont(\"bahnschrift\", 20)\n\n def step_render(self, key):\n '''\n :return: [lives, score, player, enemy, food]\n '''\n self.clock.tick(3)\n drawGrid(self.surface)\n\n if not self.food:\n self.generate_food()\n\n _lives, _score, _player, _enemy, _food = self.step(key)\n\n self.draw_snake(self.player, self.surface, SNAKE_COLOR[0], SNAKE_HEAD_COLOR[0])\n self.draw_snake(self.enemy, self.surface, SNAKE_COLOR[1], SNAKE_HEAD_COLOR[1])\n\n if not self.food:\n self.generate_food()\n\n self.draw_food(self.surface, FOOD_COLOR)\n\n self.screen.blit(self.surface, (0, 0))\n text1 = self.myfont.render(\"Score: {0} Lives: {1}\".format(round(self.score, 2), self.lives), True, (250, 250, 250))\n # text2 = myfont.render(\"Score AI {0}\".format(enemy.score), 1, (250, 250, 250))\n self.screen.blit(text1, (5, 10))\n # screen.blit(text2, (SCREEN_WIDTH - 120, 10))\n pygame.display.update()\n return _lives, _score, _player, _enemy, _food\n\n def draw_snake(self, snake, surface, color, head_color):\n drew_head = False\n for p in snake:\n curr_color = color\n if not drew_head:\n curr_color = head_color\n drew_head = True\n\n r = pygame.Rect((p[0]*GRIDSIZE, p[1]*GRIDSIZE), (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, curr_color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def draw_food(self, surface, color):\n r = pygame.Rect((self.food[0] * GRIDSIZE, self.food[1] * GRIDSIZE), (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, color, r)\n pygame.draw.rect(surface, SQUARE_COLOR, r, 1)\n\n def is_done(self):\n return self.lives <= 0\n\n def render_destroy(self):\n print(\"Snake Player Final Score:\", self.score)\n\n def end_game(self):\n if self.gui: self.render_destroy()\n raise Exception(\"Game over\")",
"step-ids": [
12,
14,
18,
22,
24
]
}
|
[
12,
14,
18,
22,
24
] |
import pymysql
db= pymysql.connect(host = 'localhost',
port = 3306,
user = 'root',
password = 'Wubaba950823',
database = 'mydb',
charset = 'utf8mb4'
)
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 插入语句 里面的数据类型要对应
sql = "INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')" % ('test3','经典','2019/12/14')
print(sql)
try:
# 执行sql语句
cursor.execute(sql)
# 执行sql语句
db.commit()
except:
# 发生错误时回滚
db.rollback()
# 关闭数据库连接
db.close()
|
normal
|
{
"blob_id": "8566e30a6450a72a0e441155321bd03363944b5a",
"index": 8236,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(sql)\ntry:\n cursor.execute(sql)\n db.commit()\nexcept:\n db.rollback()\ndb.close()\n",
"step-3": "<mask token>\ndb = pymysql.connect(host='localhost', port=3306, user='root', password=\n 'Wubaba950823', database='mydb', charset='utf8mb4')\ncursor = db.cursor()\nsql = \"INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')\" % ('test3',\n '经典', '2019/12/14')\nprint(sql)\ntry:\n cursor.execute(sql)\n db.commit()\nexcept:\n db.rollback()\ndb.close()\n",
"step-4": "import pymysql\ndb = pymysql.connect(host='localhost', port=3306, user='root', password=\n 'Wubaba950823', database='mydb', charset='utf8mb4')\ncursor = db.cursor()\nsql = \"INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')\" % ('test3',\n '经典', '2019/12/14')\nprint(sql)\ntry:\n cursor.execute(sql)\n db.commit()\nexcept:\n db.rollback()\ndb.close()\n",
"step-5": "import pymysql\n\ndb= pymysql.connect(host = 'localhost',\n port = 3306,\n user = 'root',\n password = 'Wubaba950823',\n database = 'mydb',\n charset = 'utf8mb4'\n )\n \n# 使用cursor()方法获取操作游标 \ncursor = db.cursor()\n\n# SQL 插入语句 里面的数据类型要对应\nsql = \"INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')\" % ('test3','经典','2019/12/14')\nprint(sql)\ntry:\n # 执行sql语句\n cursor.execute(sql)\n # 执行sql语句\n db.commit()\nexcept:\n # 发生错误时回滚\n db.rollback()\n \n# 关闭数据库连接\ndb.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import socket
import sys
from datetime import datetime
from threading import Thread
import logging
class RRConnection():
def __init__(self):
self._listenerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._inSock = None
self._inThread = Thread(target=self.inLoop)
self._isRunning = True
self._notify = False
self._allPassings = []
def start(self):
logging.debug("Starting thread for in-loop")
self._inThread.start()
def stop(self):
self._isRunning = False
self._listenerSocket.close()
self._inSock.close()
def inLoop(self):
self._listenerSocket.bind(('', 3601))
self._listenerSocket.listen(1)
while self._isRunning:
logging.debug("Starting listener socket on port 3601")
self._inSock, addr = self._listenerSocket.accept()
try:
logging.debug("Got connection from {}".format(addr))
keepReceiving = True
while keepReceiving:
received = self._inSock.recv(1024 * 1024)
if len(received) > 0:
self.parseCommand(received.decode())
else:
keepReceiving = False
except ConnectionResetError:
logging.debug ("Connection closed, retry")
def parseCommand(self, cmd):
allCmd = cmd.strip().split("\r\n")
for oneCmd in allCmd:
if oneCmd.strip() != "":
logging.debug("Parsing command {}".format(oneCmd))
f = oneCmd.split(';')
if hasattr(self, f[0].strip()):
getattr(self, f[0].strip())(oneCmd)
elif ":" in oneCmd:
numbers = oneCmd.split(':')
self.sendPassings(int(numbers[0]), int(numbers[1]))
elif oneCmd.isdigit():
self.sendPassings(int(oneCmd), 1)
else:
logging.debug("Function {} not known: {}".format(f[0],cmd))
def sendAnswer(self, answer):
if self._inSock:
logging.debug("Sending: {}".format(answer))
fullAnswer = answer + "\r\n"
try:
self._inSock.send(fullAnswer.encode())
except socket.error:
logging.debug("Send error!")
else:
logging.debug("Not connected!")
def addPassing(self, Bib, Date, Time):
PassingNo = len(self._allPassings) + 1
# Bib is param
# Date
# Time
EventID = "143722"
Hits = "1"
MaxRSSI = "31"
InternalData = ""
IsActive = "0"
Channel = "1"
LoopID = ""
LoopOnly = ""
WakeupCounter = ""
Battery = ""
Temperature = ""
InternalActiveData = ""
BoxName = "SwimBox"
FileNumber = "1"
MaxRSSIAntenna = "1"
BoxId = "1"
# entry = f"{PassingNo};{Bib};{Date};{Time};{EventID};{Hits};{MaxRSSI};{InternalData};{IsActive};{Channel};{LoopID};{LoopOnly};{WakeupCounter};{Battery};{Temperature};{InternalActiveData};{BoxName};{FileNumber};{MaxRSSIAntenna};{BoxId}"
entry = "{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}".format(PassingNo, Bib, Date, Time,
EventID, Hits, MaxRSSI,
InternalData, IsActive, Channel,
LoopID, LoopOnly, WakeupCounter,
Battery, Temperature,
InternalActiveData, BoxName,
FileNumber, MaxRSSIAntenna, BoxId)
self._allPassings.append(entry)
if self._notify:
self.sendAnswer("#P;{}".format(entry))
def sendPassings(self, number, count):
if number+count-1>len(self._allPassings):
self.sendAnswer("ONLY {}".format(len(self._allPassings)))
else:
for i in range(number-1, number + count -1):
self.sendAnswer(self._allPassings[i])
def SETPROTOCOL(self, str):
logging.debug("Set protocol: {}".format(str))
self.sendAnswer("SETPROTOCOL;2.0")
def GETSTATUS(self, str):
logging.debug("Get Status: {}".format(str))
# GETSTATUS;<Date>;<Time>;<HasPower>;<Antennas>;<IsInOperationMode>;<FileNumber>;<GPSHasFix>;<Latitude>,<Longitude>;<ReaderIsHealthy>;<BatteryCharge>;<BoardTemperature>;<ReaderTemperature>;<UHFFrequency>;<ActiveExtConnected>;[<Channel>];[<LoopID>];[<LoopPower>];[<LoopConnected>];[<LoopUnderPower>];<TimeIsRunning>;<TimeSource>;<ScheduledStandbyEnabled>;<IsInStandby>
# GETSTATUS;0000-00-00;00:02:39.942;1;11111111;1;50;1;49.721,8.254939;1;0;;;;;;;1;0<CrLf>
Date = datetime.now().strftime("%Y-%m-%d")
Time = datetime.now().strftime("%H:%M:%S.%f")
HasPower = "0"
Antennas = "10000000"
IsInOperationMode = "1"
FileNumber = "1"
GPSHasFix = "0"
Latitude = "0.0"
Longitude = "0.0"
ReaderIsHealthy = "1"
BatteryCharge = "100"
BoardTemperature = "20"
ReaderTemperature = "20"
UHFFrequency = "0"
ActiveExtConnected = "0"
Channel = ""
LoopID = ""
LoopPower = ""
LoopConnected = ""
LoopUnderPower = ""
TimeIsRunning = "1"
TimeSource = "0"
ScheduledStandbyEnabled = "0"
IsInStandby = "0"
ErrorFlags = "0"
# self.sendAnswer(
# f"GETSTATUS;{Date};{Time};{HasPower};{Antennas};{IsInOperationMode};{FileNumber};{GPSHasFix};{Latitude},{Longitude};{ReaderIsHealthy};{BatteryCharge};{BoardTemperature};{ReaderTemperature};{UHFFrequency};{ActiveExtConnected};{Channel};{LoopID};{LoopPower};{LoopConnected};{LoopUnderPower};{TimeIsRunning};{TimeSource};{ScheduledStandbyEnabled};{IsInStandby};{ErrorFlags}")
self.sendAnswer(
"GETSTATUS;{};{};{};{};{};{};{};{},{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}".format(Date, Time,
HasPower,
Antennas,
IsInOperationMode,
FileNumber,
GPSHasFix,
Latitude,
Longitude,
ReaderIsHealthy,
BatteryCharge,
BoardTemperature,
ReaderTemperature,
UHFFrequency,
ActiveExtConnected,
Channel,
LoopID,
LoopPower,
LoopConnected,
LoopUnderPower,
TimeIsRunning,
TimeSource,
ScheduledStandbyEnabled,
IsInStandby,
ErrorFlags))
def GETCONFIG(self, s):
parts = s.split(";")
if parts[1] == "GENERAL":
if parts[2] == "BOXNAME":
self.sendAnswer(s.strip() + ";SwimBox;1")
elif parts[2] == "TIMEZONE":
self.sendAnswer(s.strip() + ";Europe/Amsterdam")
else:
logging.debug("Unknown general request: {}".format(parts[2]))
self.sendAnswer(s.strip() + ";ERROR")
elif parts[1] == "DETECTION":
if parts[2] == "DEADTIME":
self.sendAnswer(s.strip() + ";10")
elif parts[2] == "REACTIONTIME":
self.sendAnswer(s.strip() + ";10")
elif parts[2] == "NOTIFICATION":
self.sendAnswer(s.strip() + ";1")
else:
logging.debug("Unknown detection request: {}".format(parts[2]))
self.sendAnswer(s.strip() + ";ERROR")
else:
logging.debug("Unknown config category: {}".format(parts[1]))
self.sendAnswer(s.strip() + ";ERROR")
def GETFIRMWAREVERSION(self, s):
self.sendAnswer("GETFIRMWAREVERSION;1.0")
def GETACTIVESTATUS(self, s):
self.sendAnswer("GETACTIVESTATUS;ERROR")
def PASSINGS(self, s):
self.sendAnswer("PASSINGS;{};1".format(len(self._allPassings)))
def SETPUSHPASSINGS(self, s):
parts = s.split(";")
if parts[1] == "1":
self._notify = True
else:
self.notify = False
if parts[2] == "1":
pass
# shall send all existing here
self.sendAnswer(s)
if __name__ == '__main__':
foo = RRConnection()
foo.start()
while True:
try:
logging.debug("You can enter new passings in the format <bib> (current time will be taken")
newEntry = int(input())
newTime = datetime.now()
foo.addPassing(newEntry, newTime.strftime("%Y-%m-%d"), newTime.strftime("%H:%M:%S.%f"))
except KeyboardInterrupt:
logging.debug("Exiting...")
foo.stop()
sys.exit(1)
|
normal
|
{
"blob_id": "2ccc5e01a3b47a77abcb32160dee74a6a74fcfbb",
"index": 5808,
"step-1": "<mask token>\n\n\nclass RRConnection:\n <mask token>\n <mask token>\n\n def stop(self):\n self._isRunning = False\n self._listenerSocket.close()\n self._inSock.close()\n <mask token>\n <mask token>\n\n def sendAnswer(self, answer):\n if self._inSock:\n logging.debug('Sending: {}'.format(answer))\n fullAnswer = answer + '\\r\\n'\n try:\n self._inSock.send(fullAnswer.encode())\n except socket.error:\n logging.debug('Send error!')\n else:\n logging.debug('Not connected!')\n <mask token>\n <mask token>\n\n def SETPROTOCOL(self, str):\n logging.debug('Set protocol: {}'.format(str))\n self.sendAnswer('SETPROTOCOL;2.0')\n <mask token>\n <mask token>\n <mask token>\n\n def GETACTIVESTATUS(self, s):\n self.sendAnswer('GETACTIVESTATUS;ERROR')\n\n def PASSINGS(self, s):\n self.sendAnswer('PASSINGS;{};1'.format(len(self._allPassings)))\n\n def SETPUSHPASSINGS(self, s):\n parts = s.split(';')\n if parts[1] == '1':\n self._notify = True\n else:\n self.notify = False\n if parts[2] == '1':\n pass\n self.sendAnswer(s)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RRConnection:\n\n def __init__(self):\n self._listenerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM\n )\n self._inSock = None\n self._inThread = Thread(target=self.inLoop)\n self._isRunning = True\n self._notify = False\n self._allPassings = []\n <mask token>\n\n def stop(self):\n self._isRunning = False\n self._listenerSocket.close()\n self._inSock.close()\n\n def inLoop(self):\n self._listenerSocket.bind(('', 3601))\n self._listenerSocket.listen(1)\n while self._isRunning:\n logging.debug('Starting listener socket on port 3601')\n self._inSock, addr = self._listenerSocket.accept()\n try:\n logging.debug('Got connection from {}'.format(addr))\n keepReceiving = True\n while keepReceiving:\n received = self._inSock.recv(1024 * 1024)\n if len(received) > 0:\n self.parseCommand(received.decode())\n else:\n keepReceiving = False\n except ConnectionResetError:\n logging.debug('Connection closed, retry')\n <mask token>\n\n def sendAnswer(self, answer):\n if self._inSock:\n logging.debug('Sending: {}'.format(answer))\n fullAnswer = answer + '\\r\\n'\n try:\n self._inSock.send(fullAnswer.encode())\n except socket.error:\n logging.debug('Send error!')\n else:\n logging.debug('Not connected!')\n\n def addPassing(self, Bib, Date, Time):\n PassingNo = len(self._allPassings) + 1\n EventID = '143722'\n Hits = '1'\n MaxRSSI = '31'\n InternalData = ''\n IsActive = '0'\n Channel = '1'\n LoopID = ''\n LoopOnly = ''\n WakeupCounter = ''\n Battery = ''\n Temperature = ''\n InternalActiveData = ''\n BoxName = 'SwimBox'\n FileNumber = '1'\n MaxRSSIAntenna = '1'\n BoxId = '1'\n entry = ('{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}'\n .format(PassingNo, Bib, Date, Time, EventID, Hits, MaxRSSI,\n InternalData, IsActive, Channel, LoopID, LoopOnly,\n WakeupCounter, Battery, Temperature, InternalActiveData,\n BoxName, FileNumber, MaxRSSIAntenna, BoxId))\n self._allPassings.append(entry)\n if self._notify:\n self.sendAnswer('#P;{}'.format(entry))\n\n def sendPassings(self, number, count):\n if number + count - 1 > len(self._allPassings):\n self.sendAnswer('ONLY {}'.format(len(self._allPassings)))\n else:\n for i in range(number - 1, number + count - 1):\n self.sendAnswer(self._allPassings[i])\n\n def SETPROTOCOL(self, str):\n logging.debug('Set protocol: {}'.format(str))\n self.sendAnswer('SETPROTOCOL;2.0')\n\n def GETSTATUS(self, str):\n logging.debug('Get Status: {}'.format(str))\n Date = datetime.now().strftime('%Y-%m-%d')\n Time = datetime.now().strftime('%H:%M:%S.%f')\n HasPower = '0'\n Antennas = '10000000'\n IsInOperationMode = '1'\n FileNumber = '1'\n GPSHasFix = '0'\n Latitude = '0.0'\n Longitude = '0.0'\n ReaderIsHealthy = '1'\n BatteryCharge = '100'\n BoardTemperature = '20'\n ReaderTemperature = '20'\n UHFFrequency = '0'\n ActiveExtConnected = '0'\n Channel = ''\n LoopID = ''\n LoopPower = ''\n LoopConnected = ''\n LoopUnderPower = ''\n TimeIsRunning = '1'\n TimeSource = '0'\n ScheduledStandbyEnabled = '0'\n IsInStandby = '0'\n ErrorFlags = '0'\n self.sendAnswer(\n 'GETSTATUS;{};{};{};{};{};{};{};{},{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}'\n .format(Date, Time, HasPower, Antennas, IsInOperationMode,\n FileNumber, GPSHasFix, Latitude, Longitude, ReaderIsHealthy,\n BatteryCharge, BoardTemperature, ReaderTemperature,\n UHFFrequency, ActiveExtConnected, Channel, LoopID, LoopPower,\n LoopConnected, LoopUnderPower, TimeIsRunning, TimeSource,\n ScheduledStandbyEnabled, IsInStandby, ErrorFlags))\n <mask token>\n <mask token>\n\n def GETACTIVESTATUS(self, s):\n self.sendAnswer('GETACTIVESTATUS;ERROR')\n\n def PASSINGS(self, s):\n self.sendAnswer('PASSINGS;{};1'.format(len(self._allPassings)))\n\n def SETPUSHPASSINGS(self, s):\n parts = s.split(';')\n if parts[1] == '1':\n self._notify = True\n else:\n self.notify = False\n if parts[2] == '1':\n pass\n self.sendAnswer(s)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RRConnection:\n\n def __init__(self):\n self._listenerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM\n )\n self._inSock = None\n self._inThread = Thread(target=self.inLoop)\n self._isRunning = True\n self._notify = False\n self._allPassings = []\n <mask token>\n\n def stop(self):\n self._isRunning = False\n self._listenerSocket.close()\n self._inSock.close()\n\n def inLoop(self):\n self._listenerSocket.bind(('', 3601))\n self._listenerSocket.listen(1)\n while self._isRunning:\n logging.debug('Starting listener socket on port 3601')\n self._inSock, addr = self._listenerSocket.accept()\n try:\n logging.debug('Got connection from {}'.format(addr))\n keepReceiving = True\n while keepReceiving:\n received = self._inSock.recv(1024 * 1024)\n if len(received) > 0:\n self.parseCommand(received.decode())\n else:\n keepReceiving = False\n except ConnectionResetError:\n logging.debug('Connection closed, retry')\n <mask token>\n\n def sendAnswer(self, answer):\n if self._inSock:\n logging.debug('Sending: {}'.format(answer))\n fullAnswer = answer + '\\r\\n'\n try:\n self._inSock.send(fullAnswer.encode())\n except socket.error:\n logging.debug('Send error!')\n else:\n logging.debug('Not connected!')\n\n def addPassing(self, Bib, Date, Time):\n PassingNo = len(self._allPassings) + 1\n EventID = '143722'\n Hits = '1'\n MaxRSSI = '31'\n InternalData = ''\n IsActive = '0'\n Channel = '1'\n LoopID = ''\n LoopOnly = ''\n WakeupCounter = ''\n Battery = ''\n Temperature = ''\n InternalActiveData = ''\n BoxName = 'SwimBox'\n FileNumber = '1'\n MaxRSSIAntenna = '1'\n BoxId = '1'\n entry = ('{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}'\n .format(PassingNo, Bib, Date, Time, EventID, Hits, MaxRSSI,\n InternalData, IsActive, Channel, LoopID, LoopOnly,\n WakeupCounter, Battery, Temperature, InternalActiveData,\n BoxName, FileNumber, MaxRSSIAntenna, BoxId))\n self._allPassings.append(entry)\n if self._notify:\n self.sendAnswer('#P;{}'.format(entry))\n\n def sendPassings(self, number, count):\n if number + count - 1 > len(self._allPassings):\n self.sendAnswer('ONLY {}'.format(len(self._allPassings)))\n else:\n for i in range(number - 1, number + count - 1):\n self.sendAnswer(self._allPassings[i])\n\n def SETPROTOCOL(self, str):\n logging.debug('Set protocol: {}'.format(str))\n self.sendAnswer('SETPROTOCOL;2.0')\n\n def GETSTATUS(self, str):\n logging.debug('Get Status: {}'.format(str))\n Date = datetime.now().strftime('%Y-%m-%d')\n Time = datetime.now().strftime('%H:%M:%S.%f')\n HasPower = '0'\n Antennas = '10000000'\n IsInOperationMode = '1'\n FileNumber = '1'\n GPSHasFix = '0'\n Latitude = '0.0'\n Longitude = '0.0'\n ReaderIsHealthy = '1'\n BatteryCharge = '100'\n BoardTemperature = '20'\n ReaderTemperature = '20'\n UHFFrequency = '0'\n ActiveExtConnected = '0'\n Channel = ''\n LoopID = ''\n LoopPower = ''\n LoopConnected = ''\n LoopUnderPower = ''\n TimeIsRunning = '1'\n TimeSource = '0'\n ScheduledStandbyEnabled = '0'\n IsInStandby = '0'\n ErrorFlags = '0'\n self.sendAnswer(\n 'GETSTATUS;{};{};{};{};{};{};{};{},{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}'\n .format(Date, Time, HasPower, Antennas, IsInOperationMode,\n FileNumber, GPSHasFix, Latitude, Longitude, ReaderIsHealthy,\n BatteryCharge, BoardTemperature, ReaderTemperature,\n UHFFrequency, ActiveExtConnected, Channel, LoopID, LoopPower,\n LoopConnected, LoopUnderPower, TimeIsRunning, TimeSource,\n ScheduledStandbyEnabled, IsInStandby, ErrorFlags))\n <mask token>\n\n def GETFIRMWAREVERSION(self, s):\n self.sendAnswer('GETFIRMWAREVERSION;1.0')\n\n def GETACTIVESTATUS(self, s):\n self.sendAnswer('GETACTIVESTATUS;ERROR')\n\n def PASSINGS(self, s):\n self.sendAnswer('PASSINGS;{};1'.format(len(self._allPassings)))\n\n def SETPUSHPASSINGS(self, s):\n parts = s.split(';')\n if parts[1] == '1':\n self._notify = True\n else:\n self.notify = False\n if parts[2] == '1':\n pass\n self.sendAnswer(s)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass RRConnection:\n\n def __init__(self):\n self._listenerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM\n )\n self._inSock = None\n self._inThread = Thread(target=self.inLoop)\n self._isRunning = True\n self._notify = False\n self._allPassings = []\n\n def start(self):\n logging.debug('Starting thread for in-loop')\n self._inThread.start()\n\n def stop(self):\n self._isRunning = False\n self._listenerSocket.close()\n self._inSock.close()\n\n def inLoop(self):\n self._listenerSocket.bind(('', 3601))\n self._listenerSocket.listen(1)\n while self._isRunning:\n logging.debug('Starting listener socket on port 3601')\n self._inSock, addr = self._listenerSocket.accept()\n try:\n logging.debug('Got connection from {}'.format(addr))\n keepReceiving = True\n while keepReceiving:\n received = self._inSock.recv(1024 * 1024)\n if len(received) > 0:\n self.parseCommand(received.decode())\n else:\n keepReceiving = False\n except ConnectionResetError:\n logging.debug('Connection closed, retry')\n\n def parseCommand(self, cmd):\n allCmd = cmd.strip().split('\\r\\n')\n for oneCmd in allCmd:\n if oneCmd.strip() != '':\n logging.debug('Parsing command {}'.format(oneCmd))\n f = oneCmd.split(';')\n if hasattr(self, f[0].strip()):\n getattr(self, f[0].strip())(oneCmd)\n elif ':' in oneCmd:\n numbers = oneCmd.split(':')\n self.sendPassings(int(numbers[0]), int(numbers[1]))\n elif oneCmd.isdigit():\n self.sendPassings(int(oneCmd), 1)\n else:\n logging.debug('Function {} not known: {}'.format(f[0], cmd)\n )\n\n def sendAnswer(self, answer):\n if self._inSock:\n logging.debug('Sending: {}'.format(answer))\n fullAnswer = answer + '\\r\\n'\n try:\n self._inSock.send(fullAnswer.encode())\n except socket.error:\n logging.debug('Send error!')\n else:\n logging.debug('Not connected!')\n\n def addPassing(self, Bib, Date, Time):\n PassingNo = len(self._allPassings) + 1\n EventID = '143722'\n Hits = '1'\n MaxRSSI = '31'\n InternalData = ''\n IsActive = '0'\n Channel = '1'\n LoopID = ''\n LoopOnly = ''\n WakeupCounter = ''\n Battery = ''\n Temperature = ''\n InternalActiveData = ''\n BoxName = 'SwimBox'\n FileNumber = '1'\n MaxRSSIAntenna = '1'\n BoxId = '1'\n entry = ('{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}'\n .format(PassingNo, Bib, Date, Time, EventID, Hits, MaxRSSI,\n InternalData, IsActive, Channel, LoopID, LoopOnly,\n WakeupCounter, Battery, Temperature, InternalActiveData,\n BoxName, FileNumber, MaxRSSIAntenna, BoxId))\n self._allPassings.append(entry)\n if self._notify:\n self.sendAnswer('#P;{}'.format(entry))\n\n def sendPassings(self, number, count):\n if number + count - 1 > len(self._allPassings):\n self.sendAnswer('ONLY {}'.format(len(self._allPassings)))\n else:\n for i in range(number - 1, number + count - 1):\n self.sendAnswer(self._allPassings[i])\n\n def SETPROTOCOL(self, str):\n logging.debug('Set protocol: {}'.format(str))\n self.sendAnswer('SETPROTOCOL;2.0')\n\n def GETSTATUS(self, str):\n logging.debug('Get Status: {}'.format(str))\n Date = datetime.now().strftime('%Y-%m-%d')\n Time = datetime.now().strftime('%H:%M:%S.%f')\n HasPower = '0'\n Antennas = '10000000'\n IsInOperationMode = '1'\n FileNumber = '1'\n GPSHasFix = '0'\n Latitude = '0.0'\n Longitude = '0.0'\n ReaderIsHealthy = '1'\n BatteryCharge = '100'\n BoardTemperature = '20'\n ReaderTemperature = '20'\n UHFFrequency = '0'\n ActiveExtConnected = '0'\n Channel = ''\n LoopID = ''\n LoopPower = ''\n LoopConnected = ''\n LoopUnderPower = ''\n TimeIsRunning = '1'\n TimeSource = '0'\n ScheduledStandbyEnabled = '0'\n IsInStandby = '0'\n ErrorFlags = '0'\n self.sendAnswer(\n 'GETSTATUS;{};{};{};{};{};{};{};{},{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}'\n .format(Date, Time, HasPower, Antennas, IsInOperationMode,\n FileNumber, GPSHasFix, Latitude, Longitude, ReaderIsHealthy,\n BatteryCharge, BoardTemperature, ReaderTemperature,\n UHFFrequency, ActiveExtConnected, Channel, LoopID, LoopPower,\n LoopConnected, LoopUnderPower, TimeIsRunning, TimeSource,\n ScheduledStandbyEnabled, IsInStandby, ErrorFlags))\n\n def GETCONFIG(self, s):\n parts = s.split(';')\n if parts[1] == 'GENERAL':\n if parts[2] == 'BOXNAME':\n self.sendAnswer(s.strip() + ';SwimBox;1')\n elif parts[2] == 'TIMEZONE':\n self.sendAnswer(s.strip() + ';Europe/Amsterdam')\n else:\n logging.debug('Unknown general request: {}'.format(parts[2]))\n self.sendAnswer(s.strip() + ';ERROR')\n elif parts[1] == 'DETECTION':\n if parts[2] == 'DEADTIME':\n self.sendAnswer(s.strip() + ';10')\n elif parts[2] == 'REACTIONTIME':\n self.sendAnswer(s.strip() + ';10')\n elif parts[2] == 'NOTIFICATION':\n self.sendAnswer(s.strip() + ';1')\n else:\n logging.debug('Unknown detection request: {}'.format(parts[2]))\n self.sendAnswer(s.strip() + ';ERROR')\n else:\n logging.debug('Unknown config category: {}'.format(parts[1]))\n self.sendAnswer(s.strip() + ';ERROR')\n\n def GETFIRMWAREVERSION(self, s):\n self.sendAnswer('GETFIRMWAREVERSION;1.0')\n\n def GETACTIVESTATUS(self, s):\n self.sendAnswer('GETACTIVESTATUS;ERROR')\n\n def PASSINGS(self, s):\n self.sendAnswer('PASSINGS;{};1'.format(len(self._allPassings)))\n\n def SETPUSHPASSINGS(self, s):\n parts = s.split(';')\n if parts[1] == '1':\n self._notify = True\n else:\n self.notify = False\n if parts[2] == '1':\n pass\n self.sendAnswer(s)\n\n\n<mask token>\n",
"step-5": "import socket\nimport sys\nfrom datetime import datetime\nfrom threading import Thread\nimport logging\n\n\nclass RRConnection():\n def __init__(self):\n self._listenerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._inSock = None\n self._inThread = Thread(target=self.inLoop)\n self._isRunning = True\n self._notify = False\n self._allPassings = []\n\n def start(self):\n logging.debug(\"Starting thread for in-loop\")\n self._inThread.start()\n\n def stop(self):\n self._isRunning = False\n self._listenerSocket.close()\n self._inSock.close()\n\n def inLoop(self):\n self._listenerSocket.bind(('', 3601))\n self._listenerSocket.listen(1)\n while self._isRunning:\n logging.debug(\"Starting listener socket on port 3601\")\n self._inSock, addr = self._listenerSocket.accept()\n try:\n logging.debug(\"Got connection from {}\".format(addr))\n keepReceiving = True\n while keepReceiving:\n received = self._inSock.recv(1024 * 1024)\n if len(received) > 0:\n self.parseCommand(received.decode())\n else:\n keepReceiving = False\n except ConnectionResetError:\n logging.debug (\"Connection closed, retry\")\n\n def parseCommand(self, cmd):\n allCmd = cmd.strip().split(\"\\r\\n\")\n for oneCmd in allCmd:\n if oneCmd.strip() != \"\":\n logging.debug(\"Parsing command {}\".format(oneCmd))\n f = oneCmd.split(';')\n if hasattr(self, f[0].strip()):\n getattr(self, f[0].strip())(oneCmd)\n elif \":\" in oneCmd:\n numbers = oneCmd.split(':')\n self.sendPassings(int(numbers[0]), int(numbers[1]))\n elif oneCmd.isdigit():\n self.sendPassings(int(oneCmd), 1)\n else:\n logging.debug(\"Function {} not known: {}\".format(f[0],cmd))\n\n def sendAnswer(self, answer):\n if self._inSock:\n logging.debug(\"Sending: {}\".format(answer))\n fullAnswer = answer + \"\\r\\n\"\n try:\n self._inSock.send(fullAnswer.encode())\n except socket.error:\n logging.debug(\"Send error!\")\n else:\n logging.debug(\"Not connected!\")\n\n def addPassing(self, Bib, Date, Time):\n PassingNo = len(self._allPassings) + 1\n # Bib is param\n # Date\n # Time\n EventID = \"143722\"\n Hits = \"1\"\n MaxRSSI = \"31\"\n InternalData = \"\"\n IsActive = \"0\"\n Channel = \"1\"\n LoopID = \"\"\n LoopOnly = \"\"\n WakeupCounter = \"\"\n Battery = \"\"\n Temperature = \"\"\n InternalActiveData = \"\"\n BoxName = \"SwimBox\"\n FileNumber = \"1\"\n MaxRSSIAntenna = \"1\"\n BoxId = \"1\"\n # entry = f\"{PassingNo};{Bib};{Date};{Time};{EventID};{Hits};{MaxRSSI};{InternalData};{IsActive};{Channel};{LoopID};{LoopOnly};{WakeupCounter};{Battery};{Temperature};{InternalActiveData};{BoxName};{FileNumber};{MaxRSSIAntenna};{BoxId}\"\n entry = \"{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}\".format(PassingNo, Bib, Date, Time,\n EventID, Hits, MaxRSSI,\n InternalData, IsActive, Channel,\n LoopID, LoopOnly, WakeupCounter,\n Battery, Temperature,\n InternalActiveData, BoxName,\n FileNumber, MaxRSSIAntenna, BoxId)\n self._allPassings.append(entry)\n if self._notify:\n self.sendAnswer(\"#P;{}\".format(entry))\n\n def sendPassings(self, number, count):\n if number+count-1>len(self._allPassings):\n self.sendAnswer(\"ONLY {}\".format(len(self._allPassings)))\n else:\n for i in range(number-1, number + count -1):\n self.sendAnswer(self._allPassings[i])\n\n def SETPROTOCOL(self, str):\n logging.debug(\"Set protocol: {}\".format(str))\n self.sendAnswer(\"SETPROTOCOL;2.0\")\n\n def GETSTATUS(self, str):\n logging.debug(\"Get Status: {}\".format(str))\n # GETSTATUS;<Date>;<Time>;<HasPower>;<Antennas>;<IsInOperationMode>;<FileNumber>;<GPSHasFix>;<Latitude>,<Longitude>;<ReaderIsHealthy>;<BatteryCharge>;<BoardTemperature>;<ReaderTemperature>;<UHFFrequency>;<ActiveExtConnected>;[<Channel>];[<LoopID>];[<LoopPower>];[<LoopConnected>];[<LoopUnderPower>];<TimeIsRunning>;<TimeSource>;<ScheduledStandbyEnabled>;<IsInStandby>\n # GETSTATUS;0000-00-00;00:02:39.942;1;11111111;1;50;1;49.721,8.254939;1;0;;;;;;;1;0<CrLf>\n Date = datetime.now().strftime(\"%Y-%m-%d\")\n Time = datetime.now().strftime(\"%H:%M:%S.%f\")\n HasPower = \"0\"\n Antennas = \"10000000\"\n IsInOperationMode = \"1\"\n FileNumber = \"1\"\n GPSHasFix = \"0\"\n Latitude = \"0.0\"\n Longitude = \"0.0\"\n ReaderIsHealthy = \"1\"\n BatteryCharge = \"100\"\n BoardTemperature = \"20\"\n ReaderTemperature = \"20\"\n UHFFrequency = \"0\"\n ActiveExtConnected = \"0\"\n Channel = \"\"\n LoopID = \"\"\n LoopPower = \"\"\n LoopConnected = \"\"\n LoopUnderPower = \"\"\n TimeIsRunning = \"1\"\n TimeSource = \"0\"\n ScheduledStandbyEnabled = \"0\"\n IsInStandby = \"0\"\n ErrorFlags = \"0\"\n # self.sendAnswer(\n # f\"GETSTATUS;{Date};{Time};{HasPower};{Antennas};{IsInOperationMode};{FileNumber};{GPSHasFix};{Latitude},{Longitude};{ReaderIsHealthy};{BatteryCharge};{BoardTemperature};{ReaderTemperature};{UHFFrequency};{ActiveExtConnected};{Channel};{LoopID};{LoopPower};{LoopConnected};{LoopUnderPower};{TimeIsRunning};{TimeSource};{ScheduledStandbyEnabled};{IsInStandby};{ErrorFlags}\")\n self.sendAnswer(\n \"GETSTATUS;{};{};{};{};{};{};{};{},{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}\".format(Date, Time,\n HasPower,\n Antennas,\n IsInOperationMode,\n FileNumber,\n GPSHasFix,\n Latitude,\n Longitude,\n ReaderIsHealthy,\n BatteryCharge,\n BoardTemperature,\n ReaderTemperature,\n UHFFrequency,\n ActiveExtConnected,\n Channel,\n LoopID,\n LoopPower,\n LoopConnected,\n LoopUnderPower,\n TimeIsRunning,\n TimeSource,\n ScheduledStandbyEnabled,\n IsInStandby,\n ErrorFlags))\n\n def GETCONFIG(self, s):\n parts = s.split(\";\")\n if parts[1] == \"GENERAL\":\n if parts[2] == \"BOXNAME\":\n self.sendAnswer(s.strip() + \";SwimBox;1\")\n elif parts[2] == \"TIMEZONE\":\n self.sendAnswer(s.strip() + \";Europe/Amsterdam\")\n else:\n logging.debug(\"Unknown general request: {}\".format(parts[2]))\n self.sendAnswer(s.strip() + \";ERROR\")\n elif parts[1] == \"DETECTION\":\n if parts[2] == \"DEADTIME\":\n self.sendAnswer(s.strip() + \";10\")\n elif parts[2] == \"REACTIONTIME\":\n self.sendAnswer(s.strip() + \";10\")\n elif parts[2] == \"NOTIFICATION\":\n self.sendAnswer(s.strip() + \";1\")\n else:\n logging.debug(\"Unknown detection request: {}\".format(parts[2]))\n self.sendAnswer(s.strip() + \";ERROR\")\n else:\n logging.debug(\"Unknown config category: {}\".format(parts[1]))\n self.sendAnswer(s.strip() + \";ERROR\")\n\n def GETFIRMWAREVERSION(self, s):\n self.sendAnswer(\"GETFIRMWAREVERSION;1.0\")\n\n def GETACTIVESTATUS(self, s):\n self.sendAnswer(\"GETACTIVESTATUS;ERROR\")\n\n def PASSINGS(self, s):\n self.sendAnswer(\"PASSINGS;{};1\".format(len(self._allPassings)))\n\n def SETPUSHPASSINGS(self, s):\n parts = s.split(\";\")\n if parts[1] == \"1\":\n self._notify = True\n else:\n self.notify = False\n if parts[2] == \"1\":\n pass\n # shall send all existing here\n self.sendAnswer(s)\n\n\nif __name__ == '__main__':\n foo = RRConnection()\n foo.start()\n while True:\n try:\n logging.debug(\"You can enter new passings in the format <bib> (current time will be taken\")\n newEntry = int(input())\n newTime = datetime.now()\n foo.addPassing(newEntry, newTime.strftime(\"%Y-%m-%d\"), newTime.strftime(\"%H:%M:%S.%f\"))\n except KeyboardInterrupt:\n logging.debug(\"Exiting...\")\n foo.stop()\n sys.exit(1)\n",
"step-ids": [
7,
12,
13,
16,
19
]
}
|
[
7,
12,
13,
16,
19
] |
# -*- coding: utf-8 -*-
import abc
import datetime
import importlib
import inspect
import os
import re
import six
from .library import HalLibrary
@six.add_metaclass(abc.ABCMeta)
class Hal():
def __init__(self, configpath):
self.configpath = configpath
# Find libraries inside the lib directory
dir_path = os.path.join(os.path.dirname(__file__), "libraries")
lib_files = [f for f in os.listdir(dir_path) if
os.path.isfile(os.path.join(dir_path, f)) and
f.lower().endswith(".py")
]
self.responses = []
self.libraries = []
for f in lib_files:
# Try to load the module
try:
module_name = "hal.libraries." + f[:-3]
module = importlib.import_module(module_name)
for name, obj in inspect.getmembers(module):
# Find classes that inherit from HalLibrary
if inspect.isclass(obj) and issubclass(obj, HalLibrary) and \
name != "HalLibrary" and not inspect.isabstract(obj):
self.libraries.append(obj)
except:
self.add_response("Error loading library {}".format(f))
raise
def add_response(self, text):
self.responses.append(text)
def say_all(self):
response = "\n".join(self.responses)
return response
@abc.abstractmethod
def display_help(self):
""" Present some information to the user """
pass
def greet(self):
hour = datetime.datetime.now().hour
greeting = "Good Evening"
if hour < 12:
greeting = 'Good morning'
elif 12 <= hour < 18:
greeting = 'Good afternoon'
self.add_response("{}. What can I help you with?".format(greeting))
def process(self, command):
"""
Process the command and get response by querying each plugin if required.
"""
self.responses = []
if(len(command) == 0):
self.greet()
return self.say_all()
# prepare the command
command = command.strip()
# Some hard coded patterns: If first word is help, activate help
# moudule
help_regex = re.compile("help\s+([^\s]+)")
help_match = help_regex.match(command)
if help_match:
keyword = help_match.group(1).lower()
# Try to find libraries with the keyword and print their help
for lib in self.libraries:
if keyword in lib.keywords:
# Print the help text
help_content = lib.help()
self.display_help(help_content)
return
matched = False
for lib in self.libraries:
lib_obj = lib(command)
# try to match the command with the library
lib_obj.process_input()
if lib_obj.status == HalLibrary.SUCCESS or lib_obj.status == HalLibrary.INCOMPLETE:
matched = True
lib_obj.process()
resp = lib_obj.get_response()
for r in resp:
self.add_response(r)
elif lib_obj.status == HalLibrary.ERROR:
matched = True
self.add_response("ERROR: " + lib_obj.get_error())
else:
# Failure to match
pass
if not matched:
self.add_response("I don't understand what you're saying.")
return self.say_all()
|
normal
|
{
"blob_id": "81dec10686b521dc9400a209caabc1601efd2a88",
"index": 540,
"step-1": "<mask token>\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass Hal:\n\n def __init__(self, configpath):\n self.configpath = configpath\n dir_path = os.path.join(os.path.dirname(__file__), 'libraries')\n lib_files = [f for f in os.listdir(dir_path) if os.path.isfile(os.\n path.join(dir_path, f)) and f.lower().endswith('.py')]\n self.responses = []\n self.libraries = []\n for f in lib_files:\n try:\n module_name = 'hal.libraries.' + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and issubclass(obj, HalLibrary\n ) and name != 'HalLibrary' and not inspect.isabstract(\n obj):\n self.libraries.append(obj)\n except:\n self.add_response('Error loading library {}'.format(f))\n raise\n <mask token>\n <mask token>\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass Hal:\n\n def __init__(self, configpath):\n self.configpath = configpath\n dir_path = os.path.join(os.path.dirname(__file__), 'libraries')\n lib_files = [f for f in os.listdir(dir_path) if os.path.isfile(os.\n path.join(dir_path, f)) and f.lower().endswith('.py')]\n self.responses = []\n self.libraries = []\n for f in lib_files:\n try:\n module_name = 'hal.libraries.' + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and issubclass(obj, HalLibrary\n ) and name != 'HalLibrary' and not inspect.isabstract(\n obj):\n self.libraries.append(obj)\n except:\n self.add_response('Error loading library {}'.format(f))\n raise\n <mask token>\n\n def say_all(self):\n response = '\\n'.join(self.responses)\n return response\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n\n def greet(self):\n hour = datetime.datetime.now().hour\n greeting = 'Good Evening'\n if hour < 12:\n greeting = 'Good morning'\n elif 12 <= hour < 18:\n greeting = 'Good afternoon'\n self.add_response('{}. What can I help you with?'.format(greeting))\n <mask token>\n",
"step-3": "<mask token>\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass Hal:\n\n def __init__(self, configpath):\n self.configpath = configpath\n dir_path = os.path.join(os.path.dirname(__file__), 'libraries')\n lib_files = [f for f in os.listdir(dir_path) if os.path.isfile(os.\n path.join(dir_path, f)) and f.lower().endswith('.py')]\n self.responses = []\n self.libraries = []\n for f in lib_files:\n try:\n module_name = 'hal.libraries.' + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and issubclass(obj, HalLibrary\n ) and name != 'HalLibrary' and not inspect.isabstract(\n obj):\n self.libraries.append(obj)\n except:\n self.add_response('Error loading library {}'.format(f))\n raise\n\n def add_response(self, text):\n self.responses.append(text)\n\n def say_all(self):\n response = '\\n'.join(self.responses)\n return response\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n\n def greet(self):\n hour = datetime.datetime.now().hour\n greeting = 'Good Evening'\n if hour < 12:\n greeting = 'Good morning'\n elif 12 <= hour < 18:\n greeting = 'Good afternoon'\n self.add_response('{}. What can I help you with?'.format(greeting))\n <mask token>\n",
"step-4": "import abc\nimport datetime\nimport importlib\nimport inspect\nimport os\nimport re\nimport six\nfrom .library import HalLibrary\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass Hal:\n\n def __init__(self, configpath):\n self.configpath = configpath\n dir_path = os.path.join(os.path.dirname(__file__), 'libraries')\n lib_files = [f for f in os.listdir(dir_path) if os.path.isfile(os.\n path.join(dir_path, f)) and f.lower().endswith('.py')]\n self.responses = []\n self.libraries = []\n for f in lib_files:\n try:\n module_name = 'hal.libraries.' + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and issubclass(obj, HalLibrary\n ) and name != 'HalLibrary' and not inspect.isabstract(\n obj):\n self.libraries.append(obj)\n except:\n self.add_response('Error loading library {}'.format(f))\n raise\n\n def add_response(self, text):\n self.responses.append(text)\n\n def say_all(self):\n response = '\\n'.join(self.responses)\n return response\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n\n def greet(self):\n hour = datetime.datetime.now().hour\n greeting = 'Good Evening'\n if hour < 12:\n greeting = 'Good morning'\n elif 12 <= hour < 18:\n greeting = 'Good afternoon'\n self.add_response('{}. What can I help you with?'.format(greeting))\n\n def process(self, command):\n \"\"\"\n Process the command and get response by querying each plugin if required.\n \"\"\"\n self.responses = []\n if len(command) == 0:\n self.greet()\n return self.say_all()\n command = command.strip()\n help_regex = re.compile('help\\\\s+([^\\\\s]+)')\n help_match = help_regex.match(command)\n if help_match:\n keyword = help_match.group(1).lower()\n for lib in self.libraries:\n if keyword in lib.keywords:\n help_content = lib.help()\n self.display_help(help_content)\n return\n matched = False\n for lib in self.libraries:\n lib_obj = lib(command)\n lib_obj.process_input()\n if (lib_obj.status == HalLibrary.SUCCESS or lib_obj.status ==\n HalLibrary.INCOMPLETE):\n matched = True\n lib_obj.process()\n resp = lib_obj.get_response()\n for r in resp:\n self.add_response(r)\n elif lib_obj.status == HalLibrary.ERROR:\n matched = True\n self.add_response('ERROR: ' + lib_obj.get_error())\n else:\n pass\n if not matched:\n self.add_response(\"I don't understand what you're saying.\")\n return self.say_all()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport abc\nimport datetime\nimport importlib\nimport inspect\nimport os\nimport re\nimport six\n\nfrom .library import HalLibrary\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass Hal():\n\n def __init__(self, configpath):\n self.configpath = configpath\n # Find libraries inside the lib directory\n\n dir_path = os.path.join(os.path.dirname(__file__), \"libraries\")\n lib_files = [f for f in os.listdir(dir_path) if\n os.path.isfile(os.path.join(dir_path, f)) and\n f.lower().endswith(\".py\")\n ]\n\n self.responses = []\n self.libraries = []\n for f in lib_files:\n # Try to load the module\n try:\n module_name = \"hal.libraries.\" + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n # Find classes that inherit from HalLibrary\n if inspect.isclass(obj) and issubclass(obj, HalLibrary) and \\\n name != \"HalLibrary\" and not inspect.isabstract(obj):\n self.libraries.append(obj)\n except:\n self.add_response(\"Error loading library {}\".format(f))\n raise\n\n def add_response(self, text):\n self.responses.append(text)\n\n def say_all(self):\n response = \"\\n\".join(self.responses)\n return response\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n\n\n def greet(self):\n hour = datetime.datetime.now().hour\n\n greeting = \"Good Evening\"\n if hour < 12:\n greeting = 'Good morning'\n elif 12 <= hour < 18:\n greeting = 'Good afternoon'\n\n self.add_response(\"{}. What can I help you with?\".format(greeting))\n\n def process(self, command):\n \"\"\"\n Process the command and get response by querying each plugin if required.\n \"\"\"\n self.responses = []\n if(len(command) == 0):\n self.greet()\n return self.say_all()\n\n # prepare the command\n command = command.strip()\n\n # Some hard coded patterns: If first word is help, activate help\n # moudule\n help_regex = re.compile(\"help\\s+([^\\s]+)\")\n help_match = help_regex.match(command)\n if help_match:\n keyword = help_match.group(1).lower()\n # Try to find libraries with the keyword and print their help\n\n for lib in self.libraries:\n if keyword in lib.keywords:\n # Print the help text\n help_content = lib.help()\n self.display_help(help_content)\n return\n\n matched = False\n\n for lib in self.libraries:\n\n lib_obj = lib(command)\n\n # try to match the command with the library\n lib_obj.process_input()\n\n if lib_obj.status == HalLibrary.SUCCESS or lib_obj.status == HalLibrary.INCOMPLETE:\n\n matched = True\n\n lib_obj.process()\n\n resp = lib_obj.get_response()\n\n for r in resp:\n self.add_response(r)\n\n elif lib_obj.status == HalLibrary.ERROR:\n matched = True\n self.add_response(\"ERROR: \" + lib_obj.get_error())\n else:\n # Failure to match\n pass\n\n if not matched:\n self.add_response(\"I don't understand what you're saying.\")\n\n return self.say_all()\n",
"step-ids": [
3,
5,
6,
8,
9
]
}
|
[
3,
5,
6,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_problem_7_1_8():
assert check_linalg()
assert abs(problem_7_1_8(5000) - 84.8) < 1
<|reserved_special_token_1|>
import pytest
from a3 import *
from test_utils import *
from numpy import allclose
def test_problem_7_1_8():
assert check_linalg()
assert abs(problem_7_1_8(5000) - 84.8) < 1
<|reserved_special_token_1|>
import pytest
from a3 import *
from test_utils import *
from numpy import allclose
def test_problem_7_1_8():
assert(check_linalg())
assert(abs(problem_7_1_8(5000)-84.8)<1)
|
flexible
|
{
"blob_id": "c7553cadb49c9c7e80a7800b9bff4d5f64796494",
"index": 7568,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_problem_7_1_8():\n assert check_linalg()\n assert abs(problem_7_1_8(5000) - 84.8) < 1\n",
"step-3": "import pytest\nfrom a3 import *\nfrom test_utils import *\nfrom numpy import allclose\n\n\ndef test_problem_7_1_8():\n assert check_linalg()\n assert abs(problem_7_1_8(5000) - 84.8) < 1\n",
"step-4": "import pytest\nfrom a3 import *\nfrom test_utils import *\nfrom numpy import allclose\n\ndef test_problem_7_1_8():\n assert(check_linalg())\n assert(abs(problem_7_1_8(5000)-84.8)<1)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sqlite3
conn = sqlite3.connect("blog.db")
c = conn.cursor()
q = "CREATE TABLE users(Username text, Password text, UserID integer)"
c.execute(q)
q = "CREATE TABLE blogs(Title text, Content text, BlogID integer, UserID integer)"
c.execute(q)
q = "CREATE TABLE comments(Content text, CommentID integer, BlogID integer, UserID integer)"
c.execute(q)
conn.commit()
|
normal
|
{
"blob_id": "8afaa69d3a20c5e39e6321869f25dbd9020a5b3a",
"index": 2460,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nc.execute(q)\n<mask token>\nc.execute(q)\n<mask token>\nc.execute(q)\nconn.commit()\n",
"step-3": "<mask token>\nconn = sqlite3.connect('blog.db')\nc = conn.cursor()\nq = 'CREATE TABLE users(Username text, Password text, UserID integer)'\nc.execute(q)\nq = (\n 'CREATE TABLE blogs(Title text, Content text, BlogID integer, UserID integer)'\n )\nc.execute(q)\nq = (\n 'CREATE TABLE comments(Content text, CommentID integer, BlogID integer, UserID integer)'\n )\nc.execute(q)\nconn.commit()\n",
"step-4": "import sqlite3\nconn = sqlite3.connect('blog.db')\nc = conn.cursor()\nq = 'CREATE TABLE users(Username text, Password text, UserID integer)'\nc.execute(q)\nq = (\n 'CREATE TABLE blogs(Title text, Content text, BlogID integer, UserID integer)'\n )\nc.execute(q)\nq = (\n 'CREATE TABLE comments(Content text, CommentID integer, BlogID integer, UserID integer)'\n )\nc.execute(q)\nconn.commit()\n",
"step-5": "import sqlite3\n\nconn = sqlite3.connect(\"blog.db\")\n\nc = conn.cursor()\n\nq = \"CREATE TABLE users(Username text, Password text, UserID integer)\"\nc.execute(q)\n\nq = \"CREATE TABLE blogs(Title text, Content text, BlogID integer, UserID integer)\"\nc.execute(q)\n\nq = \"CREATE TABLE comments(Content text, CommentID integer, BlogID integer, UserID integer)\"\nc.execute(q)\n\nconn.commit() \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class test_leapyear(unittest.TestCase):
<|reserved_special_token_0|>
def test_hundred(self):
self.assertEqual(LeapYear.leapyear(2100), False)
def test_fourhundred(self):
self.assertEqual(LeapYear.leapyear(2000), True)
def test_normal(self):
self.assertEqual(LeapYear.leapyear(2002), False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class test_leapyear(unittest.TestCase):
def test_four(self):
self.assertEqual(LeapYear.leapyear(2012), True)
def test_hundred(self):
self.assertEqual(LeapYear.leapyear(2100), False)
def test_fourhundred(self):
self.assertEqual(LeapYear.leapyear(2000), True)
def test_normal(self):
self.assertEqual(LeapYear.leapyear(2002), False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class test_leapyear(unittest.TestCase):
def test_four(self):
self.assertEqual(LeapYear.leapyear(2012), True)
def test_hundred(self):
self.assertEqual(LeapYear.leapyear(2100), False)
def test_fourhundred(self):
self.assertEqual(LeapYear.leapyear(2000), True)
def test_normal(self):
self.assertEqual(LeapYear.leapyear(2002), False)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
import LeapYear
class test_leapyear(unittest.TestCase):
def test_four(self):
self.assertEqual(LeapYear.leapyear(2012), True)
def test_hundred(self):
self.assertEqual(LeapYear.leapyear(2100), False)
def test_fourhundred(self):
self.assertEqual(LeapYear.leapyear(2000), True)
def test_normal(self):
self.assertEqual(LeapYear.leapyear(2002), False)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
# test_LeapYear.py
# By Alex Graalum
import unittest
import LeapYear
class test_leapyear(unittest.TestCase):
def test_four(self):
self.assertEqual(LeapYear.leapyear(2012), True)
def test_hundred(self):
self.assertEqual(LeapYear.leapyear(2100), False)
def test_fourhundred(self):
self.assertEqual(LeapYear.leapyear(2000), True)
def test_normal(self):
self.assertEqual(LeapYear.leapyear(2002), False)
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "29cae66fdca65020a82212e5eabbc61eb900e543",
"index": 7720,
"step-1": "<mask token>\n\n\nclass test_leapyear(unittest.TestCase):\n <mask token>\n\n def test_hundred(self):\n self.assertEqual(LeapYear.leapyear(2100), False)\n\n def test_fourhundred(self):\n self.assertEqual(LeapYear.leapyear(2000), True)\n\n def test_normal(self):\n self.assertEqual(LeapYear.leapyear(2002), False)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass test_leapyear(unittest.TestCase):\n\n def test_four(self):\n self.assertEqual(LeapYear.leapyear(2012), True)\n\n def test_hundred(self):\n self.assertEqual(LeapYear.leapyear(2100), False)\n\n def test_fourhundred(self):\n self.assertEqual(LeapYear.leapyear(2000), True)\n\n def test_normal(self):\n self.assertEqual(LeapYear.leapyear(2002), False)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass test_leapyear(unittest.TestCase):\n\n def test_four(self):\n self.assertEqual(LeapYear.leapyear(2012), True)\n\n def test_hundred(self):\n self.assertEqual(LeapYear.leapyear(2100), False)\n\n def test_fourhundred(self):\n self.assertEqual(LeapYear.leapyear(2000), True)\n\n def test_normal(self):\n self.assertEqual(LeapYear.leapyear(2002), False)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport LeapYear\n\n\nclass test_leapyear(unittest.TestCase):\n\n def test_four(self):\n self.assertEqual(LeapYear.leapyear(2012), True)\n\n def test_hundred(self):\n self.assertEqual(LeapYear.leapyear(2100), False)\n\n def test_fourhundred(self):\n self.assertEqual(LeapYear.leapyear(2000), True)\n\n def test_normal(self):\n self.assertEqual(LeapYear.leapyear(2002), False)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "# test_LeapYear.py\n# By Alex Graalum\nimport unittest\nimport LeapYear\n\nclass test_leapyear(unittest.TestCase):\n def test_four(self):\n self.assertEqual(LeapYear.leapyear(2012), True)\n def test_hundred(self):\n self.assertEqual(LeapYear.leapyear(2100), False)\n def test_fourhundred(self):\n self.assertEqual(LeapYear.leapyear(2000), True)\n def test_normal(self):\n self.assertEqual(LeapYear.leapyear(2002), False)\n \nif __name__ == '__main__':\n unittest.main()\n \n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def checkSubarraySum(self, nums, k):
if not nums or len(nums) == 1:
return False
sum_array = [0] * (len(nums) + 1)
for i, num in enumerate(nums):
sum_array[i + 1] = sum_array[i] + num
if k == 0:
if sum_array[-1] == 0:
return True
else:
return False
for i in range(1, len(sum_array)):
for j in range(i - 1):
if not (sum_array[i] - sum_array[j]) % k:
return True
return False
|
flexible
|
{
"blob_id": "033973ddc81a5fdf0e40009c4f321215fe3f4217",
"index": 6779,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def checkSubarraySum(self, nums, k):\n if not nums or len(nums) == 1:\n return False\n sum_array = [0] * (len(nums) + 1)\n for i, num in enumerate(nums):\n sum_array[i + 1] = sum_array[i] + num\n if k == 0:\n if sum_array[-1] == 0:\n return True\n else:\n return False\n for i in range(1, len(sum_array)):\n for j in range(i - 1):\n if not (sum_array[i] - sum_array[j]) % k:\n return True\n return False\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class UDP_packet:
def __init__(self, board_info, board_add, state):
self.board_type = int('{0:08b}'.format(board_info)[:4], 2)
self.board_num = int('{0:08b}'.format(board_info)[4:], 2)
self.board_add = board_add
self.state = state
def __str__(self):
return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,
self.board_num, self.board_add, self.state)
def __repr__(self):
return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,
self.board_num, self.board_add, self.state)
<|reserved_special_token_0|>
def init_UDP_connection(DEBUG_MODE=False):
if DEBUG_MODE:
UDP_MASTER_IP = '127.0.0.2'
UDP_MASTER_PORT = 5005
UDP_PC_IP = '127.0.0.1'
UDP_PC_PORT = 5006
else:
UDP_MASTER_IP = '192.168.1.26'
UDP_MASTER_PORT = 5005
UDP_PC_IP = '192.168.1.25'
UDP_PC_PORT = 5005
print('My IP is: {0}, PORT: {1}\nTarget IP is: {0}, PORT: {1}'.
format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(0)
sock.bind((UDP_PC_IP, UDP_PC_PORT))
return sock, UDP_MASTER_IP, UDP_MASTER_PORT
<|reserved_special_token_0|>
def main(sock):
data = b'HELLO'
while True:
ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)
if ready[0]:
data, _ = sock.recvfrom(80)
print('PC: I just received message: [{0}]'.format(data))
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print('PC: I just Sent a [{0}]'.format(data))
else:
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print('PC: I just Sent a [{0}]'.format(data))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UDP_packet:
def __init__(self, board_info, board_add, state):
self.board_type = int('{0:08b}'.format(board_info)[:4], 2)
self.board_num = int('{0:08b}'.format(board_info)[4:], 2)
self.board_add = board_add
self.state = state
def __str__(self):
return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,
self.board_num, self.board_add, self.state)
def __repr__(self):
return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,
self.board_num, self.board_add, self.state)
<|reserved_special_token_0|>
def init_UDP_connection(DEBUG_MODE=False):
if DEBUG_MODE:
UDP_MASTER_IP = '127.0.0.2'
UDP_MASTER_PORT = 5005
UDP_PC_IP = '127.0.0.1'
UDP_PC_PORT = 5006
else:
UDP_MASTER_IP = '192.168.1.26'
UDP_MASTER_PORT = 5005
UDP_PC_IP = '192.168.1.25'
UDP_PC_PORT = 5005
print('My IP is: {0}, PORT: {1}\nTarget IP is: {0}, PORT: {1}'.
format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(0)
sock.bind((UDP_PC_IP, UDP_PC_PORT))
return sock, UDP_MASTER_IP, UDP_MASTER_PORT
<|reserved_special_token_0|>
def main(sock):
data = b'HELLO'
while True:
ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)
if ready[0]:
data, _ = sock.recvfrom(80)
print('PC: I just received message: [{0}]'.format(data))
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print('PC: I just Sent a [{0}]'.format(data))
else:
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print('PC: I just Sent a [{0}]'.format(data))
if __name__ == '__main__':
sock = init_UDP_connection()
main(sock)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
UDP_RECEIVE_TIMEOUT = 1
LOOP_DELAY = 1
<|reserved_special_token_0|>
class UDP_packet:
def __init__(self, board_info, board_add, state):
self.board_type = int('{0:08b}'.format(board_info)[:4], 2)
self.board_num = int('{0:08b}'.format(board_info)[4:], 2)
self.board_add = board_add
self.state = state
def __str__(self):
return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,
self.board_num, self.board_add, self.state)
def __repr__(self):
return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,
self.board_num, self.board_add, self.state)
<|reserved_special_token_0|>
def init_UDP_connection(DEBUG_MODE=False):
if DEBUG_MODE:
UDP_MASTER_IP = '127.0.0.2'
UDP_MASTER_PORT = 5005
UDP_PC_IP = '127.0.0.1'
UDP_PC_PORT = 5006
else:
UDP_MASTER_IP = '192.168.1.26'
UDP_MASTER_PORT = 5005
UDP_PC_IP = '192.168.1.25'
UDP_PC_PORT = 5005
print('My IP is: {0}, PORT: {1}\nTarget IP is: {0}, PORT: {1}'.
format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(0)
sock.bind((UDP_PC_IP, UDP_PC_PORT))
return sock, UDP_MASTER_IP, UDP_MASTER_PORT
<|reserved_special_token_0|>
def main(sock):
data = b'HELLO'
while True:
ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)
if ready[0]:
data, _ = sock.recvfrom(80)
print('PC: I just received message: [{0}]'.format(data))
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print('PC: I just Sent a [{0}]'.format(data))
else:
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print('PC: I just Sent a [{0}]'.format(data))
if __name__ == '__main__':
sock = init_UDP_connection()
main(sock)
<|reserved_special_token_1|>
import socket
import select
import time
<|reserved_special_token_0|>
UDP_RECEIVE_TIMEOUT = 1
LOOP_DELAY = 1
<|reserved_special_token_0|>
class UDP_packet:
def __init__(self, board_info, board_add, state):
self.board_type = int('{0:08b}'.format(board_info)[:4], 2)
self.board_num = int('{0:08b}'.format(board_info)[4:], 2)
self.board_add = board_add
self.state = state
def __str__(self):
return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,
self.board_num, self.board_add, self.state)
def __repr__(self):
return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,
self.board_num, self.board_add, self.state)
<|reserved_special_token_0|>
def init_UDP_connection(DEBUG_MODE=False):
if DEBUG_MODE:
UDP_MASTER_IP = '127.0.0.2'
UDP_MASTER_PORT = 5005
UDP_PC_IP = '127.0.0.1'
UDP_PC_PORT = 5006
else:
UDP_MASTER_IP = '192.168.1.26'
UDP_MASTER_PORT = 5005
UDP_PC_IP = '192.168.1.25'
UDP_PC_PORT = 5005
print('My IP is: {0}, PORT: {1}\nTarget IP is: {0}, PORT: {1}'.
format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(0)
sock.bind((UDP_PC_IP, UDP_PC_PORT))
return sock, UDP_MASTER_IP, UDP_MASTER_PORT
<|reserved_special_token_0|>
def main(sock):
data = b'HELLO'
while True:
ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)
if ready[0]:
data, _ = sock.recvfrom(80)
print('PC: I just received message: [{0}]'.format(data))
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print('PC: I just Sent a [{0}]'.format(data))
else:
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print('PC: I just Sent a [{0}]'.format(data))
if __name__ == '__main__':
sock = init_UDP_connection()
main(sock)
<|reserved_special_token_1|>
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import socket
import select
import time
"""=====================Head Define====================="""
UDP_RECEIVE_TIMEOUT = 1
LOOP_DELAY = 1
"""=====================Class====================="""
class UDP_packet:
def __init__(self,board_info, board_add, state):
self.board_type = int("{0:08b}".format(board_info)[:4], 2)
self.board_num = int("{0:08b}".format(board_info)[4:], 2)
self.board_add = board_add
self.state = state
def __str__(self):
return "Type:{}, Num:{}, Addr:{}, State:{}".format(self.board_type, self.board_num, self.board_add, self.state)
def __repr__(self):
return "Type:{}, Num:{}, Addr:{}, State:{}".format(self.board_type, self.board_num, self.board_add, self.state)
"""=====================Support functions====================="""
def init_UDP_connection(DEBUG_MODE=False):
if DEBUG_MODE:
UDP_MASTER_IP = "127.0.0.2"
UDP_MASTER_PORT = 5005
UDP_PC_IP = "127.0.0.1"
UDP_PC_PORT = 5006
else:
UDP_MASTER_IP = "192.168.1.26"
UDP_MASTER_PORT = 5005
UDP_PC_IP = "192.168.1.25"
UDP_PC_PORT = 5005
print("My IP is: {0}, PORT: {1}\nTarget IP is: {0}, PORT: {1}".format(UDP_PC_IP, UDP_PC_PORT,UDP_MASTER_IP, UDP_MASTER_PORT))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(0)
sock.bind((UDP_PC_IP, UDP_PC_PORT))
return sock, UDP_MASTER_IP, UDP_MASTER_PORT
"""===================== MAIN ====================="""
def main(sock):
data = b"HELLO"
while True:
ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)
if ready[0]:
data, _ = sock.recvfrom(80) # buffer size is 1024 bytes
print("PC: I just received message: [{0}]".format(data))
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print("PC: I just Sent a [{0}]".format(data))
else:
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print("PC: I just Sent a [{0}]".format(data))
if __name__ == '__main__':
sock = init_UDP_connection()
main(sock)
|
flexible
|
{
"blob_id": "7c2a59f698b75d0de89a16310d97a01506c99cb3",
"index": 9840,
"step-1": "<mask token>\n\n\nclass UDP_packet:\n\n def __init__(self, board_info, board_add, state):\n self.board_type = int('{0:08b}'.format(board_info)[:4], 2)\n self.board_num = int('{0:08b}'.format(board_info)[4:], 2)\n self.board_add = board_add\n self.state = state\n\n def __str__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n def __repr__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n\n<mask token>\n\n\ndef init_UDP_connection(DEBUG_MODE=False):\n if DEBUG_MODE:\n UDP_MASTER_IP = '127.0.0.2'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '127.0.0.1'\n UDP_PC_PORT = 5006\n else:\n UDP_MASTER_IP = '192.168.1.26'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '192.168.1.25'\n UDP_PC_PORT = 5005\n print('My IP is: {0}, PORT: {1}\\nTarget IP is: {0}, PORT: {1}'.\n format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setblocking(0)\n sock.bind((UDP_PC_IP, UDP_PC_PORT))\n return sock, UDP_MASTER_IP, UDP_MASTER_PORT\n\n\n<mask token>\n\n\ndef main(sock):\n data = b'HELLO'\n while True:\n ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)\n if ready[0]:\n data, _ = sock.recvfrom(80)\n print('PC: I just received message: [{0}]'.format(data))\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n else:\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UDP_packet:\n\n def __init__(self, board_info, board_add, state):\n self.board_type = int('{0:08b}'.format(board_info)[:4], 2)\n self.board_num = int('{0:08b}'.format(board_info)[4:], 2)\n self.board_add = board_add\n self.state = state\n\n def __str__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n def __repr__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n\n<mask token>\n\n\ndef init_UDP_connection(DEBUG_MODE=False):\n if DEBUG_MODE:\n UDP_MASTER_IP = '127.0.0.2'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '127.0.0.1'\n UDP_PC_PORT = 5006\n else:\n UDP_MASTER_IP = '192.168.1.26'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '192.168.1.25'\n UDP_PC_PORT = 5005\n print('My IP is: {0}, PORT: {1}\\nTarget IP is: {0}, PORT: {1}'.\n format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setblocking(0)\n sock.bind((UDP_PC_IP, UDP_PC_PORT))\n return sock, UDP_MASTER_IP, UDP_MASTER_PORT\n\n\n<mask token>\n\n\ndef main(sock):\n data = b'HELLO'\n while True:\n ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)\n if ready[0]:\n data, _ = sock.recvfrom(80)\n print('PC: I just received message: [{0}]'.format(data))\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n else:\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n\n\nif __name__ == '__main__':\n sock = init_UDP_connection()\n main(sock)\n",
"step-3": "<mask token>\nUDP_RECEIVE_TIMEOUT = 1\nLOOP_DELAY = 1\n<mask token>\n\n\nclass UDP_packet:\n\n def __init__(self, board_info, board_add, state):\n self.board_type = int('{0:08b}'.format(board_info)[:4], 2)\n self.board_num = int('{0:08b}'.format(board_info)[4:], 2)\n self.board_add = board_add\n self.state = state\n\n def __str__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n def __repr__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n\n<mask token>\n\n\ndef init_UDP_connection(DEBUG_MODE=False):\n if DEBUG_MODE:\n UDP_MASTER_IP = '127.0.0.2'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '127.0.0.1'\n UDP_PC_PORT = 5006\n else:\n UDP_MASTER_IP = '192.168.1.26'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '192.168.1.25'\n UDP_PC_PORT = 5005\n print('My IP is: {0}, PORT: {1}\\nTarget IP is: {0}, PORT: {1}'.\n format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setblocking(0)\n sock.bind((UDP_PC_IP, UDP_PC_PORT))\n return sock, UDP_MASTER_IP, UDP_MASTER_PORT\n\n\n<mask token>\n\n\ndef main(sock):\n data = b'HELLO'\n while True:\n ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)\n if ready[0]:\n data, _ = sock.recvfrom(80)\n print('PC: I just received message: [{0}]'.format(data))\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n else:\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n\n\nif __name__ == '__main__':\n sock = init_UDP_connection()\n main(sock)\n",
"step-4": "import socket\nimport select\nimport time\n<mask token>\nUDP_RECEIVE_TIMEOUT = 1\nLOOP_DELAY = 1\n<mask token>\n\n\nclass UDP_packet:\n\n def __init__(self, board_info, board_add, state):\n self.board_type = int('{0:08b}'.format(board_info)[:4], 2)\n self.board_num = int('{0:08b}'.format(board_info)[4:], 2)\n self.board_add = board_add\n self.state = state\n\n def __str__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n def __repr__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n\n<mask token>\n\n\ndef init_UDP_connection(DEBUG_MODE=False):\n if DEBUG_MODE:\n UDP_MASTER_IP = '127.0.0.2'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '127.0.0.1'\n UDP_PC_PORT = 5006\n else:\n UDP_MASTER_IP = '192.168.1.26'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '192.168.1.25'\n UDP_PC_PORT = 5005\n print('My IP is: {0}, PORT: {1}\\nTarget IP is: {0}, PORT: {1}'.\n format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setblocking(0)\n sock.bind((UDP_PC_IP, UDP_PC_PORT))\n return sock, UDP_MASTER_IP, UDP_MASTER_PORT\n\n\n<mask token>\n\n\ndef main(sock):\n data = b'HELLO'\n while True:\n ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)\n if ready[0]:\n data, _ = sock.recvfrom(80)\n print('PC: I just received message: [{0}]'.format(data))\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n else:\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n\n\nif __name__ == '__main__':\n sock = init_UDP_connection()\n main(sock)\n",
"step-5": "#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nimport socket\nimport select\nimport time\n\n\"\"\"=====================Head Define=====================\"\"\"\nUDP_RECEIVE_TIMEOUT = 1\nLOOP_DELAY = 1\n\n\"\"\"=====================Class=====================\"\"\"\n\n\nclass UDP_packet:\n def __init__(self,board_info, board_add, state):\n self.board_type = int(\"{0:08b}\".format(board_info)[:4], 2)\n self.board_num = int(\"{0:08b}\".format(board_info)[4:], 2)\n self.board_add = board_add\n self.state = state\n\n def __str__(self):\n return \"Type:{}, Num:{}, Addr:{}, State:{}\".format(self.board_type, self.board_num, self.board_add, self.state)\n\n def __repr__(self):\n return \"Type:{}, Num:{}, Addr:{}, State:{}\".format(self.board_type, self.board_num, self.board_add, self.state)\n\n\n\"\"\"=====================Support functions=====================\"\"\"\n\n\ndef init_UDP_connection(DEBUG_MODE=False):\n if DEBUG_MODE:\n UDP_MASTER_IP = \"127.0.0.2\"\n UDP_MASTER_PORT = 5005\n \n UDP_PC_IP = \"127.0.0.1\"\n UDP_PC_PORT = 5006\n else:\n UDP_MASTER_IP = \"192.168.1.26\"\n UDP_MASTER_PORT = 5005\n \n UDP_PC_IP = \"192.168.1.25\"\n UDP_PC_PORT = 5005 \n print(\"My IP is: {0}, PORT: {1}\\nTarget IP is: {0}, PORT: {1}\".format(UDP_PC_IP, UDP_PC_PORT,UDP_MASTER_IP, UDP_MASTER_PORT))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setblocking(0)\n sock.bind((UDP_PC_IP, UDP_PC_PORT))\n\n return sock, UDP_MASTER_IP, UDP_MASTER_PORT\n\n\n\"\"\"===================== MAIN =====================\"\"\"\n\n\ndef main(sock):\n\n data = b\"HELLO\"\n while True:\n ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)\n if ready[0]:\n data, _ = sock.recvfrom(80) # buffer size is 1024 bytes\n print(\"PC: I just received message: [{0}]\".format(data))\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print(\"PC: I just Sent a [{0}]\".format(data))\n else:\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print(\"PC: I just Sent a [{0}]\".format(data))\n\n\nif __name__ == '__main__':\n sock = init_UDP_connection()\n main(sock)",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
class NP_Net_MirrorSym:
<|reserved_special_token_0|>
def load_from_file(self, fname):
params = joblib.load(fname)
pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]
obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']
obrms_count = params[pol_scope + '/obfilter/count:0']
obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']
self.obrms_mean = obrms_runningsum / obrms_count
self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -
self.obrms_mean ** 2, 0.01, 1000000))
for i in range(10):
if pol_scope + '/pol_net/genff' + str(i) + '/w:0' in params:
W = params[pol_scope + '/pol_net/genff' + str(i) + '/w:0']
b = params[pol_scope + '/pol_net/genff' + str(i) + '/b:0']
self.nn_params.append([W, b])
W_final = params[pol_scope + '/pol_net/genff_out/w:0']
b_final = params[pol_scope + '/pol_net/genff_out/b:0']
self.nn_params.append([W_final, b_final])
def get_output(self, input, activation=np.tanh):
assert self.obrms_mean is not None
last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,
5.0)
for i in range(len(self.nn_params) - 1):
last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +
self.nn_params[i][1])
out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]
mirrorlast_out = np.clip((np.dot(input, self.obs_perm_mat) - self.
obrms_mean) / self.obrms_std, -5.0, 5.0)
for i in range(len(self.nn_params) - 1):
mirrorlast_out = activation(np.dot(self.nn_params[i][0].T,
mirrorlast_out) + self.nn_params[i][1])
mirrorout = np.dot(np.dot(self.nn_params[-1][0].T, mirrorlast_out) +
self.nn_params[-1][1], self.act_perm_mat)
if self.nvec is None:
return out + mirrorout
else:
splitted_out = np.split(out + mirrorout, np.cumsum(self.nvec)[0:-1]
)
discrete_out = np.array([np.argmax(prob) for prob in splitted_out])
return discrete_out
class NP_Policy:
def __init__(self, interp_sch, param_file, discrete_action, action_bins,
delta_angle_scale, action_filter_size, obs_perm=None, act_perm=None):
self.interp_sch = interp_sch
self.obs_cache = []
self.action_cache = []
self.action_filter_size = action_filter_size
if interp_sch is not None:
self.net = NP_Net()
else:
self.net = NP_Net_MirrorSym(action_bins, obs_perm, act_perm)
self.net.load_from_file(param_file)
self.discrete_action = discrete_action
self.delta_angle_scale = delta_angle_scale
if discrete_action:
self.net.nvec = action_bins
def get_initial_state(self):
if self.interp_sch is not None:
return self.interp_sch[0][1]
else:
return 0.5 * (pose_squat + pose_stand)
def reset(self):
self.action_cache = []
def act(self, o, t):
new_action = self.net.get_output(o)
if self.discrete_action:
new_action = new_action * 1.0 / np.floor(self.net.nvec / 2.0) - 1.0
self.action_cache.append(new_action)
if len(self.action_cache) > self.action_filter_size:
self.action_cache.pop(0)
filtered_action = np.mean(self.action_cache, axis=0)
clamped_control = np.clip(filtered_action, -1, 1)
if self.interp_sch is not None:
self.ref_target = self.interp_sch[0][1]
for i in range(len(self.interp_sch) - 1):
if t >= self.interp_sch[i][0] and t < self.interp_sch[i + 1][0
]:
ratio = (t - self.interp_sch[i][0]) / (self.interp_sch[
i + 1][0] - self.interp_sch[i][0])
self.ref_target = ratio * self.interp_sch[i + 1][1] + (
1 - ratio) * self.interp_sch[i][1]
if t > self.interp_sch[-1][0]:
self.ref_target = self.interp_sch[-1][1]
target_pose = (self.ref_target + clamped_control * self.
delta_angle_scale)
else:
target_pose = (clamped_control + 1.0) / 2.0 * (
SIM_CONTROL_UP_BOUND_RAD - SIM_CONTROL_LOW_BOUND_RAD
) + SIM_CONTROL_LOW_BOUND_RAD
target_pose = np.clip(target_pose, SIM_JOINT_LOW_BOUND_RAD,
SIM_JOINT_UP_BOUND_RAD)
return target_pose
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NP_Net:
def __init__(self, nvec=None):
self.obrms_mean = None
self.obrms_std = None
self.nn_params = []
self.nvec = nvec
<|reserved_special_token_0|>
def get_output(self, input, activation=np.tanh):
assert self.obrms_mean is not None
last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,
5.0)
for i in range(len(self.nn_params) - 1):
last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +
self.nn_params[i][1])
out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]
if self.nvec is None:
return out
else:
splitted_out = np.split(out, np.cumsum(self.nvec)[0:-1])
discrete_out = np.array([np.argmax(prob) for prob in splitted_out])
return discrete_out
class NP_Net_MirrorSym:
def __init__(self, nvec=None, observation_permutation=None,
action_permutation=None):
self.obrms_mean = None
self.obrms_std = None
self.nn_params = []
self.nvec = nvec
obs_perm_mat = np.zeros((len(observation_permutation), len(
observation_permutation)), dtype=np.float32)
self.obs_perm_mat = obs_perm_mat
for i, perm in enumerate(observation_permutation):
obs_perm_mat[i][int(np.abs(perm))] = np.sign(perm)
if nvec is None:
act_perm_mat = np.zeros((len(action_permutation), len(
action_permutation)), dtype=np.float32)
self.act_perm_mat = act_perm_mat
for i, perm in enumerate(action_permutation):
self.act_perm_mat[i][int(np.abs(perm))] = np.sign(perm)
else:
total_dim = int(np.sum(nvec))
dim_index = np.concatenate([[0], np.cumsum(nvec)])
act_perm_mat = np.zeros((total_dim, total_dim), dtype=np.float32)
self.act_perm_mat = act_perm_mat
for i, perm in enumerate(action_permutation):
perm_mat = np.identity(nvec[i])
if np.sign(perm) < 0:
perm_mat = np.flipud(perm_mat)
self.act_perm_mat[dim_index[i]:dim_index[i] + nvec[i],
dim_index[int(np.abs(perm))]:dim_index[int(np.abs(perm)
)] + nvec[int(np.abs(perm))]] = perm_mat
def load_from_file(self, fname):
params = joblib.load(fname)
pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]
obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']
obrms_count = params[pol_scope + '/obfilter/count:0']
obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']
self.obrms_mean = obrms_runningsum / obrms_count
self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -
self.obrms_mean ** 2, 0.01, 1000000))
for i in range(10):
if pol_scope + '/pol_net/genff' + str(i) + '/w:0' in params:
W = params[pol_scope + '/pol_net/genff' + str(i) + '/w:0']
b = params[pol_scope + '/pol_net/genff' + str(i) + '/b:0']
self.nn_params.append([W, b])
W_final = params[pol_scope + '/pol_net/genff_out/w:0']
b_final = params[pol_scope + '/pol_net/genff_out/b:0']
self.nn_params.append([W_final, b_final])
def get_output(self, input, activation=np.tanh):
assert self.obrms_mean is not None
last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,
5.0)
for i in range(len(self.nn_params) - 1):
last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +
self.nn_params[i][1])
out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]
mirrorlast_out = np.clip((np.dot(input, self.obs_perm_mat) - self.
obrms_mean) / self.obrms_std, -5.0, 5.0)
for i in range(len(self.nn_params) - 1):
mirrorlast_out = activation(np.dot(self.nn_params[i][0].T,
mirrorlast_out) + self.nn_params[i][1])
mirrorout = np.dot(np.dot(self.nn_params[-1][0].T, mirrorlast_out) +
self.nn_params[-1][1], self.act_perm_mat)
if self.nvec is None:
return out + mirrorout
else:
splitted_out = np.split(out + mirrorout, np.cumsum(self.nvec)[0:-1]
)
discrete_out = np.array([np.argmax(prob) for prob in splitted_out])
return discrete_out
class NP_Policy:
def __init__(self, interp_sch, param_file, discrete_action, action_bins,
delta_angle_scale, action_filter_size, obs_perm=None, act_perm=None):
self.interp_sch = interp_sch
self.obs_cache = []
self.action_cache = []
self.action_filter_size = action_filter_size
if interp_sch is not None:
self.net = NP_Net()
else:
self.net = NP_Net_MirrorSym(action_bins, obs_perm, act_perm)
self.net.load_from_file(param_file)
self.discrete_action = discrete_action
self.delta_angle_scale = delta_angle_scale
if discrete_action:
self.net.nvec = action_bins
def get_initial_state(self):
if self.interp_sch is not None:
return self.interp_sch[0][1]
else:
return 0.5 * (pose_squat + pose_stand)
def reset(self):
self.action_cache = []
def act(self, o, t):
new_action = self.net.get_output(o)
if self.discrete_action:
new_action = new_action * 1.0 / np.floor(self.net.nvec / 2.0) - 1.0
self.action_cache.append(new_action)
if len(self.action_cache) > self.action_filter_size:
self.action_cache.pop(0)
filtered_action = np.mean(self.action_cache, axis=0)
clamped_control = np.clip(filtered_action, -1, 1)
if self.interp_sch is not None:
self.ref_target = self.interp_sch[0][1]
for i in range(len(self.interp_sch) - 1):
if t >= self.interp_sch[i][0] and t < self.interp_sch[i + 1][0
]:
ratio = (t - self.interp_sch[i][0]) / (self.interp_sch[
i + 1][0] - self.interp_sch[i][0])
self.ref_target = ratio * self.interp_sch[i + 1][1] + (
1 - ratio) * self.interp_sch[i][1]
if t > self.interp_sch[-1][0]:
self.ref_target = self.interp_sch[-1][1]
target_pose = (self.ref_target + clamped_control * self.
delta_angle_scale)
else:
target_pose = (clamped_control + 1.0) / 2.0 * (
SIM_CONTROL_UP_BOUND_RAD - SIM_CONTROL_LOW_BOUND_RAD
) + SIM_CONTROL_LOW_BOUND_RAD
target_pose = np.clip(target_pose, SIM_JOINT_LOW_BOUND_RAD,
SIM_JOINT_UP_BOUND_RAD)
return target_pose
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NP_Net:
def __init__(self, nvec=None):
self.obrms_mean = None
self.obrms_std = None
self.nn_params = []
self.nvec = nvec
def load_from_file(self, fname):
params = joblib.load(fname)
pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]
obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']
obrms_count = params[pol_scope + '/obfilter/count:0']
obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']
self.obrms_mean = obrms_runningsum / obrms_count
self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -
self.obrms_mean ** 2, 0.01, 1000000))
for i in range(10):
if pol_scope + '/pol/fc' + str(i) + '/kernel:0' in params:
W = params[pol_scope + '/pol/fc' + str(i) + '/kernel:0']
b = params[pol_scope + '/pol/fc' + str(i) + '/bias:0']
self.nn_params.append([W, b])
W_final = params[pol_scope + '/pol/final/kernel:0']
b_final = params[pol_scope + '/pol/final/bias:0']
self.nn_params.append([W_final, b_final])
def get_output(self, input, activation=np.tanh):
assert self.obrms_mean is not None
last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,
5.0)
for i in range(len(self.nn_params) - 1):
last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +
self.nn_params[i][1])
out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]
if self.nvec is None:
return out
else:
splitted_out = np.split(out, np.cumsum(self.nvec)[0:-1])
discrete_out = np.array([np.argmax(prob) for prob in splitted_out])
return discrete_out
class NP_Net_MirrorSym:
def __init__(self, nvec=None, observation_permutation=None,
action_permutation=None):
self.obrms_mean = None
self.obrms_std = None
self.nn_params = []
self.nvec = nvec
obs_perm_mat = np.zeros((len(observation_permutation), len(
observation_permutation)), dtype=np.float32)
self.obs_perm_mat = obs_perm_mat
for i, perm in enumerate(observation_permutation):
obs_perm_mat[i][int(np.abs(perm))] = np.sign(perm)
if nvec is None:
act_perm_mat = np.zeros((len(action_permutation), len(
action_permutation)), dtype=np.float32)
self.act_perm_mat = act_perm_mat
for i, perm in enumerate(action_permutation):
self.act_perm_mat[i][int(np.abs(perm))] = np.sign(perm)
else:
total_dim = int(np.sum(nvec))
dim_index = np.concatenate([[0], np.cumsum(nvec)])
act_perm_mat = np.zeros((total_dim, total_dim), dtype=np.float32)
self.act_perm_mat = act_perm_mat
for i, perm in enumerate(action_permutation):
perm_mat = np.identity(nvec[i])
if np.sign(perm) < 0:
perm_mat = np.flipud(perm_mat)
self.act_perm_mat[dim_index[i]:dim_index[i] + nvec[i],
dim_index[int(np.abs(perm))]:dim_index[int(np.abs(perm)
)] + nvec[int(np.abs(perm))]] = perm_mat
def load_from_file(self, fname):
params = joblib.load(fname)
pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]
obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']
obrms_count = params[pol_scope + '/obfilter/count:0']
obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']
self.obrms_mean = obrms_runningsum / obrms_count
self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -
self.obrms_mean ** 2, 0.01, 1000000))
for i in range(10):
if pol_scope + '/pol_net/genff' + str(i) + '/w:0' in params:
W = params[pol_scope + '/pol_net/genff' + str(i) + '/w:0']
b = params[pol_scope + '/pol_net/genff' + str(i) + '/b:0']
self.nn_params.append([W, b])
W_final = params[pol_scope + '/pol_net/genff_out/w:0']
b_final = params[pol_scope + '/pol_net/genff_out/b:0']
self.nn_params.append([W_final, b_final])
def get_output(self, input, activation=np.tanh):
assert self.obrms_mean is not None
last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,
5.0)
for i in range(len(self.nn_params) - 1):
last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +
self.nn_params[i][1])
out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]
mirrorlast_out = np.clip((np.dot(input, self.obs_perm_mat) - self.
obrms_mean) / self.obrms_std, -5.0, 5.0)
for i in range(len(self.nn_params) - 1):
mirrorlast_out = activation(np.dot(self.nn_params[i][0].T,
mirrorlast_out) + self.nn_params[i][1])
mirrorout = np.dot(np.dot(self.nn_params[-1][0].T, mirrorlast_out) +
self.nn_params[-1][1], self.act_perm_mat)
if self.nvec is None:
return out + mirrorout
else:
splitted_out = np.split(out + mirrorout, np.cumsum(self.nvec)[0:-1]
)
discrete_out = np.array([np.argmax(prob) for prob in splitted_out])
return discrete_out
class NP_Policy:
def __init__(self, interp_sch, param_file, discrete_action, action_bins,
delta_angle_scale, action_filter_size, obs_perm=None, act_perm=None):
self.interp_sch = interp_sch
self.obs_cache = []
self.action_cache = []
self.action_filter_size = action_filter_size
if interp_sch is not None:
self.net = NP_Net()
else:
self.net = NP_Net_MirrorSym(action_bins, obs_perm, act_perm)
self.net.load_from_file(param_file)
self.discrete_action = discrete_action
self.delta_angle_scale = delta_angle_scale
if discrete_action:
self.net.nvec = action_bins
def get_initial_state(self):
if self.interp_sch is not None:
return self.interp_sch[0][1]
else:
return 0.5 * (pose_squat + pose_stand)
def reset(self):
self.action_cache = []
def act(self, o, t):
new_action = self.net.get_output(o)
if self.discrete_action:
new_action = new_action * 1.0 / np.floor(self.net.nvec / 2.0) - 1.0
self.action_cache.append(new_action)
if len(self.action_cache) > self.action_filter_size:
self.action_cache.pop(0)
filtered_action = np.mean(self.action_cache, axis=0)
clamped_control = np.clip(filtered_action, -1, 1)
if self.interp_sch is not None:
self.ref_target = self.interp_sch[0][1]
for i in range(len(self.interp_sch) - 1):
if t >= self.interp_sch[i][0] and t < self.interp_sch[i + 1][0
]:
ratio = (t - self.interp_sch[i][0]) / (self.interp_sch[
i + 1][0] - self.interp_sch[i][0])
self.ref_target = ratio * self.interp_sch[i + 1][1] + (
1 - ratio) * self.interp_sch[i][1]
if t > self.interp_sch[-1][0]:
self.ref_target = self.interp_sch[-1][1]
target_pose = (self.ref_target + clamped_control * self.
delta_angle_scale)
else:
target_pose = (clamped_control + 1.0) / 2.0 * (
SIM_CONTROL_UP_BOUND_RAD - SIM_CONTROL_LOW_BOUND_RAD
) + SIM_CONTROL_LOW_BOUND_RAD
target_pose = np.clip(target_pose, SIM_JOINT_LOW_BOUND_RAD,
SIM_JOINT_UP_BOUND_RAD)
return target_pose
def toRobot(positions):
index = [3, 0, 4, 1, 5, 2, 14, 8, 15, 9, 16, 10, 17, 11, 18, 12, 19, 13,
6, 7]
robotState = np.zeros(len(positions))
for i in range(len(positions)):
robotState[i] = int(positions[i] * 180 * (1 / (np.pi * 0.088))) + 2048
return robotState[index].astype(int)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import joblib
import numpy as np
from darwin.darwin_utils import *
class NP_Net:
def __init__(self, nvec=None):
self.obrms_mean = None
self.obrms_std = None
self.nn_params = []
self.nvec = nvec
def load_from_file(self, fname):
params = joblib.load(fname)
pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]
obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']
obrms_count = params[pol_scope + '/obfilter/count:0']
obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']
self.obrms_mean = obrms_runningsum / obrms_count
self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -
self.obrms_mean ** 2, 0.01, 1000000))
for i in range(10):
if pol_scope + '/pol/fc' + str(i) + '/kernel:0' in params:
W = params[pol_scope + '/pol/fc' + str(i) + '/kernel:0']
b = params[pol_scope + '/pol/fc' + str(i) + '/bias:0']
self.nn_params.append([W, b])
W_final = params[pol_scope + '/pol/final/kernel:0']
b_final = params[pol_scope + '/pol/final/bias:0']
self.nn_params.append([W_final, b_final])
def get_output(self, input, activation=np.tanh):
assert self.obrms_mean is not None
last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,
5.0)
for i in range(len(self.nn_params) - 1):
last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +
self.nn_params[i][1])
out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]
if self.nvec is None:
return out
else:
splitted_out = np.split(out, np.cumsum(self.nvec)[0:-1])
discrete_out = np.array([np.argmax(prob) for prob in splitted_out])
return discrete_out
class NP_Net_MirrorSym:
def __init__(self, nvec=None, observation_permutation=None,
action_permutation=None):
self.obrms_mean = None
self.obrms_std = None
self.nn_params = []
self.nvec = nvec
obs_perm_mat = np.zeros((len(observation_permutation), len(
observation_permutation)), dtype=np.float32)
self.obs_perm_mat = obs_perm_mat
for i, perm in enumerate(observation_permutation):
obs_perm_mat[i][int(np.abs(perm))] = np.sign(perm)
if nvec is None:
act_perm_mat = np.zeros((len(action_permutation), len(
action_permutation)), dtype=np.float32)
self.act_perm_mat = act_perm_mat
for i, perm in enumerate(action_permutation):
self.act_perm_mat[i][int(np.abs(perm))] = np.sign(perm)
else:
total_dim = int(np.sum(nvec))
dim_index = np.concatenate([[0], np.cumsum(nvec)])
act_perm_mat = np.zeros((total_dim, total_dim), dtype=np.float32)
self.act_perm_mat = act_perm_mat
for i, perm in enumerate(action_permutation):
perm_mat = np.identity(nvec[i])
if np.sign(perm) < 0:
perm_mat = np.flipud(perm_mat)
self.act_perm_mat[dim_index[i]:dim_index[i] + nvec[i],
dim_index[int(np.abs(perm))]:dim_index[int(np.abs(perm)
)] + nvec[int(np.abs(perm))]] = perm_mat
def load_from_file(self, fname):
params = joblib.load(fname)
pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]
obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']
obrms_count = params[pol_scope + '/obfilter/count:0']
obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']
self.obrms_mean = obrms_runningsum / obrms_count
self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -
self.obrms_mean ** 2, 0.01, 1000000))
for i in range(10):
if pol_scope + '/pol_net/genff' + str(i) + '/w:0' in params:
W = params[pol_scope + '/pol_net/genff' + str(i) + '/w:0']
b = params[pol_scope + '/pol_net/genff' + str(i) + '/b:0']
self.nn_params.append([W, b])
W_final = params[pol_scope + '/pol_net/genff_out/w:0']
b_final = params[pol_scope + '/pol_net/genff_out/b:0']
self.nn_params.append([W_final, b_final])
def get_output(self, input, activation=np.tanh):
assert self.obrms_mean is not None
last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,
5.0)
for i in range(len(self.nn_params) - 1):
last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +
self.nn_params[i][1])
out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]
mirrorlast_out = np.clip((np.dot(input, self.obs_perm_mat) - self.
obrms_mean) / self.obrms_std, -5.0, 5.0)
for i in range(len(self.nn_params) - 1):
mirrorlast_out = activation(np.dot(self.nn_params[i][0].T,
mirrorlast_out) + self.nn_params[i][1])
mirrorout = np.dot(np.dot(self.nn_params[-1][0].T, mirrorlast_out) +
self.nn_params[-1][1], self.act_perm_mat)
if self.nvec is None:
return out + mirrorout
else:
splitted_out = np.split(out + mirrorout, np.cumsum(self.nvec)[0:-1]
)
discrete_out = np.array([np.argmax(prob) for prob in splitted_out])
return discrete_out
class NP_Policy:
def __init__(self, interp_sch, param_file, discrete_action, action_bins,
delta_angle_scale, action_filter_size, obs_perm=None, act_perm=None):
self.interp_sch = interp_sch
self.obs_cache = []
self.action_cache = []
self.action_filter_size = action_filter_size
if interp_sch is not None:
self.net = NP_Net()
else:
self.net = NP_Net_MirrorSym(action_bins, obs_perm, act_perm)
self.net.load_from_file(param_file)
self.discrete_action = discrete_action
self.delta_angle_scale = delta_angle_scale
if discrete_action:
self.net.nvec = action_bins
def get_initial_state(self):
if self.interp_sch is not None:
return self.interp_sch[0][1]
else:
return 0.5 * (pose_squat + pose_stand)
def reset(self):
self.action_cache = []
def act(self, o, t):
new_action = self.net.get_output(o)
if self.discrete_action:
new_action = new_action * 1.0 / np.floor(self.net.nvec / 2.0) - 1.0
self.action_cache.append(new_action)
if len(self.action_cache) > self.action_filter_size:
self.action_cache.pop(0)
filtered_action = np.mean(self.action_cache, axis=0)
clamped_control = np.clip(filtered_action, -1, 1)
if self.interp_sch is not None:
self.ref_target = self.interp_sch[0][1]
for i in range(len(self.interp_sch) - 1):
if t >= self.interp_sch[i][0] and t < self.interp_sch[i + 1][0
]:
ratio = (t - self.interp_sch[i][0]) / (self.interp_sch[
i + 1][0] - self.interp_sch[i][0])
self.ref_target = ratio * self.interp_sch[i + 1][1] + (
1 - ratio) * self.interp_sch[i][1]
if t > self.interp_sch[-1][0]:
self.ref_target = self.interp_sch[-1][1]
target_pose = (self.ref_target + clamped_control * self.
delta_angle_scale)
else:
target_pose = (clamped_control + 1.0) / 2.0 * (
SIM_CONTROL_UP_BOUND_RAD - SIM_CONTROL_LOW_BOUND_RAD
) + SIM_CONTROL_LOW_BOUND_RAD
target_pose = np.clip(target_pose, SIM_JOINT_LOW_BOUND_RAD,
SIM_JOINT_UP_BOUND_RAD)
return target_pose
def toRobot(positions):
index = [3, 0, 4, 1, 5, 2, 14, 8, 15, 9, 16, 10, 17, 11, 18, 12, 19, 13,
6, 7]
robotState = np.zeros(len(positions))
for i in range(len(positions)):
robotState[i] = int(positions[i] * 180 * (1 / (np.pi * 0.088))) + 2048
return robotState[index].astype(int)
if __name__ == '__main__':
import pydart2 as pydart
import gym
env = gym.make('DartDarwinSquat-v1')
env.reset()
dart_world = env.env.dart_world
class Controller(object):
def __init__(self, world, policy):
self.world = world
self.target = None
self.kp = np.array([2.1, 1.79, 4.93, 2.0, 2.02, 1.98, 2.2, 2.06,
148, 152, 150, 136, 153, 102, 151, 151.4, 150.45, 151.36,
154, 105.2])
self.kd = np.array([0.21, 0.23, 0.22, 0.25, 0.21, 0.26, 0.28,
0.213, 0.192, 0.198, 0.22, 0.199, 0.02, 0.01, 0.53, 0.27,
0.21, 0.205, 0.022, 0.056])
self.step = 0
self.frameskip = 25
self.fulltau = np.zeros(26)
self.np_policy = policy
self.target_sim_cache = []
self.target_hw_cache = []
def compute(self):
if self.step % self.frameskip == 0:
o = np.concatenate([self.world.skeletons[-1].q[6:], self.
world.skeletons[-1].dq[6:]])
self.target = self.np_policy.act(o, self.world.time())
self.target_hw_cache.append(toRobot(self.target))
self.target_sim_cache.append(RADIAN2VAL(self.target))
np.savetxt('darwin/feedforward_target_simindex.txt', np.
array(self.target_sim_cache, dtype=np.int))
np.savetxt('darwin/feedforward_target_hwindex.txt', np.
array(self.target_hw_cache, dtype=np.int))
tau = -self.kp * (self.world.skeletons[-1].q[6:] - self.target
) - self.kd * self.world.skeletons[-1].dq[6:]
self.fulltau = np.concatenate([np.zeros(6), tau])
self.step += 1
return np.clip(self.fulltau, -3.5, 3.5)
for i in range(6, dart_world.skeletons[-1].ndofs):
j = dart_world.skeletons[-1].dof(i)
j.set_damping_coefficient(0.515)
dart_world.set_gravity([0, 0, -9.81])
dart_world.skeletons[1].set_mobile(False)
dart_world.skeletons[1].q = dart_world.skeletons[1].q + 100
dart_world.set_collision_detector(0)
dart_world.skeletons[-1].set_self_collision_check(False)
dart_world.skeletons[0].bodynodes[0].set_friction_coeff(5.0)
for bn in dart_world.skeletons[-1].bodynodes:
bn.set_friction_coeff(5.0)
pose_squat_val = np.array([2509, 2297, 1714, 1508, 1816, 2376, 2047,
2171, 2032, 2039, 2795, 648, 1231, 2040, 2041, 2060, 1281, 3448,
2855, 2073])
pose_stand_val = np.array([1500, 2048, 2048, 2500, 2048, 2048, 2048,
2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048,
2048, 2048])
pose_squat = VAL2RADIAN(pose_squat_val)
pose_stand = VAL2RADIAN(pose_stand_val)
interp_sch = [[0.0, pose_squat], [3.0, pose_stand], [4.0, pose_stand]]
policy = NP_Policy(interp_sch,
'data/darwin_standsquat_policy_conseq_obs_warmstart.pkl',
discrete_action=True, action_bins=np.array([11] * 20),
delta_angle_scale=0.3)
controller = Controller(dart_world, policy)
dart_world.skeletons[-1].set_controller(controller)
print('create controller OK')
pydart.gui.viewer.launch(dart_world, default_camera=1)
<|reserved_special_token_1|>
################################################################################
# Controller of the Darwin Squat-Stand task using numpy #
# Note: all joint data used in this file uses the dof indexing with #
# from the simulation environment, not the hardware. #
################################################################################
import joblib
import numpy as np
from darwin.darwin_utils import *
# Class for a neural network model in numpy
class NP_Net:
def __init__(self, nvec = None):
self.obrms_mean = None # for observation running mean std
self.obrms_std = None # for observation running mean std
self.nn_params = [] # stores the neural net parameters in the form of [[W0, b0], [W1, b1], ... [Wn, bn]]
self.nvec = nvec # None if continuous action, otherwise discrete action in the form of
# [numbins, numbins, ... numbins]
def load_from_file(self, fname):
params = joblib.load(fname)
pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]
obrms_runningsumsq = params[pol_scope+'/obfilter/runningsumsq:0']
obrms_count = params[pol_scope+'/obfilter/count:0']
obrms_runningsum = params[pol_scope+'/obfilter/runningsum:0']
self.obrms_mean = obrms_runningsum / obrms_count
self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count - (self.obrms_mean**2), 1e-2, 1000000))
for i in range(10): # assume maximum layer size of 10
if pol_scope+'/pol/fc'+str(i)+'/kernel:0' in params:
W = params[pol_scope+'/pol/fc'+str(i)+'/kernel:0']
b = params[pol_scope+'/pol/fc'+str(i)+'/bias:0']
self.nn_params.append([W, b])
W_final = params[pol_scope + '/pol/final/kernel:0']
b_final = params[pol_scope + '/pol/final/bias:0']
self.nn_params.append([W_final, b_final])
def get_output(self, input, activation = np.tanh):
assert self.obrms_mean is not None
last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0, 5.0)
for i in range(len(self.nn_params)-1):
last_out = activation(np.dot(self.nn_params[i][0].T, last_out) + self.nn_params[i][1])
out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]
if self.nvec is None:
return out
else:
# convert for discrete output
splitted_out = np.split(out, np.cumsum(self.nvec)[0:-1])
discrete_out = np.array([np.argmax(prob) for prob in splitted_out])
return discrete_out
# Class for a neural network model with mirror symmetry in numpy
class NP_Net_MirrorSym:
def __init__(self, nvec = None, observation_permutation=None,action_permutation=None):
self.obrms_mean = None # for observation running mean std
self.obrms_std = None # for observation running mean std
self.nn_params = [] # stores the neural net parameters in the form of [[W0, b0], [W1, b1], ... [Wn, bn]]
self.nvec = nvec # None if continuous action, otherwise discrete action in the form of
# [numbins, numbins, ... numbins]
obs_perm_mat = np.zeros((len(observation_permutation), len(observation_permutation)), dtype=np.float32)
self.obs_perm_mat = obs_perm_mat
for i, perm in enumerate(observation_permutation):
obs_perm_mat[i][int(np.abs(perm))] = np.sign(perm)
if nvec is None:
act_perm_mat = np.zeros((len(action_permutation), len(action_permutation)), dtype=np.float32)
self.act_perm_mat = act_perm_mat
for i, perm in enumerate(action_permutation):
self.act_perm_mat[i][int(np.abs(perm))] = np.sign(perm)
else:
total_dim = int(np.sum(nvec))
dim_index = np.concatenate([[0], np.cumsum(nvec)])
act_perm_mat = np.zeros((total_dim, total_dim), dtype=np.float32)
self.act_perm_mat = act_perm_mat
for i, perm in enumerate(action_permutation):
perm_mat = np.identity(nvec[i])
if np.sign(perm) < 0:
perm_mat = np.flipud(perm_mat)
self.act_perm_mat[dim_index[i]:dim_index[i] + nvec[i],
dim_index[int(np.abs(perm))]:dim_index[int(np.abs(perm))] + nvec[int(np.abs(perm))]] = perm_mat
def load_from_file(self, fname):
params = joblib.load(fname)
pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]
obrms_runningsumsq = params[pol_scope+'/obfilter/runningsumsq:0']
obrms_count = params[pol_scope+'/obfilter/count:0']
obrms_runningsum = params[pol_scope+'/obfilter/runningsum:0']
self.obrms_mean = obrms_runningsum / obrms_count
self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count - (self.obrms_mean**2), 1e-2, 1000000))
for i in range(10): # assume maximum layer size of 10
if pol_scope+'/pol_net/genff'+str(i)+'/w:0' in params:
W = params[pol_scope+'/pol_net/genff'+str(i)+'/w:0']
b = params[pol_scope+'/pol_net/genff'+str(i)+'/b:0']
self.nn_params.append([W, b])
W_final = params[pol_scope + '/pol_net/genff_out/w:0']
b_final = params[pol_scope + '/pol_net/genff_out/b:0']
self.nn_params.append([W_final, b_final])
def get_output(self, input, activation = np.tanh):
assert self.obrms_mean is not None
last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0, 5.0)
for i in range(len(self.nn_params)-1):
last_out = activation(np.dot(self.nn_params[i][0].T, last_out) + self.nn_params[i][1])
out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]
mirrorlast_out = np.clip((np.dot(input, self.obs_perm_mat) - self.obrms_mean) / self.obrms_std, -5.0, 5.0)
for i in range(len(self.nn_params) - 1):
mirrorlast_out = activation(np.dot(self.nn_params[i][0].T, mirrorlast_out) + self.nn_params[i][1])
mirrorout = np.dot(np.dot(self.nn_params[-1][0].T, mirrorlast_out) + self.nn_params[-1][1], self.act_perm_mat)
if self.nvec is None:
return out + mirrorout
else:
# convert for discrete output
splitted_out = np.split(out + mirrorout, np.cumsum(self.nvec)[0:-1])
discrete_out = np.array([np.argmax(prob) for prob in splitted_out])
return discrete_out
# Class for a neural network policy in numpy
# Includes the action filtering and pose interpolation
class NP_Policy:
# interp_sch makes the feed-forward motion
# interp_sch contains the timing and pose id throughout the trajectory
def __init__(self, interp_sch, param_file, discrete_action, action_bins, delta_angle_scale, action_filter_size,
obs_perm = None, act_perm = None):
self.interp_sch = interp_sch
self.obs_cache = []
self.action_cache = []
self.action_filter_size = action_filter_size
if interp_sch is not None:
self.net = NP_Net()
else:
self.net = NP_Net_MirrorSym(action_bins, obs_perm, act_perm)
self.net.load_from_file(param_file)
self.discrete_action = discrete_action
self.delta_angle_scale = delta_angle_scale
if discrete_action:
self.net.nvec = action_bins
# Get the initial state for the robot
# RETURN: a 20d vector for the robot pose
def get_initial_state(self):
if self.interp_sch is not None:
return self.interp_sch[0][1]
else:
return 0.5*(pose_squat + pose_stand)
# Reset the state of the policy
# This is needed because the action cache essentially forms a memory in the policy
def reset(self):
self.action_cache = []
# Return the action to be taken by the robot given the observation and current time
# INPUT: o, a 40d vector containing the pose and velocity of the robot
# t, current time in seconds, used to get the reference pose
# RETURN: a 20d vector containing the target angle (in radians) for the robot joints
def act(self, o, t):
# get network output action
new_action = self.net.get_output(o)
if self.discrete_action:
new_action = new_action * 1.0 / np.floor(self.net.nvec/2.0) - 1.0
self.action_cache.append(new_action)
if len(self.action_cache) > self.action_filter_size:
self.action_cache.pop(0)
filtered_action = np.mean(self.action_cache, axis=0)
# get feedforward action
clamped_control = np.clip(filtered_action, -1, 1)
if self.interp_sch is not None:
self.ref_target = self.interp_sch[0][1]
for i in range(len(self.interp_sch) - 1):
if t >= self.interp_sch[i][0] and t < self.interp_sch[i + 1][0]:
ratio = (t - self.interp_sch[i][0]) / (self.interp_sch[i + 1][0] - self.interp_sch[i][0])
self.ref_target = ratio * self.interp_sch[i + 1][1] + (1 - ratio) * self.interp_sch[i][1]
if t > self.interp_sch[-1][0]:
self.ref_target = self.interp_sch[-1][1]
# combine policy output and keyframe interpolation to get the target joint positions
target_pose = self.ref_target + clamped_control * self.delta_angle_scale
else:
target_pose = (clamped_control + 1.0) / 2.0 * (SIM_CONTROL_UP_BOUND_RAD - SIM_CONTROL_LOW_BOUND_RAD) + SIM_CONTROL_LOW_BOUND_RAD
target_pose = np.clip(target_pose, SIM_JOINT_LOW_BOUND_RAD, SIM_JOINT_UP_BOUND_RAD)
return target_pose
def toRobot(positions):
# reorder joints
index = [3,0,4,1,5,2,14,8,15,9,16,10,17,11,18,12,19,13,6,7]
# convert from radians to int
robotState = np.zeros(len(positions))
for i in range(len(positions)):
robotState[i] = int(positions[i]*180*(1/(np.pi*0.088))) + 2048
return robotState[index].astype(int)
#######################################
# test the file in pydart2 simulation #
#######################################
if __name__ == "__main__":
import pydart2 as pydart
import gym
env = gym.make('DartDarwinSquat-v1') # use the dart_world in the gym environment to avoid copying the data
env.reset()
dart_world = env.env.dart_world
class Controller(object):
def __init__(self, world, policy):
self.world = world
self.target = None
self.kp = np.array([2.1, 1.79, 4.93,
2.0, 2.02, 1.98,
2.2, 2.06,
148, 152, 150, 136, 153, 102,
151, 151.4, 150.45, 151.36, 154, 105.2])
self.kd = np.array([0.21, 0.23, 0.22,
0.25, 0.21, 0.26,
0.28, 0.213
, 0.192, 0.198, 0.22, 0.199, 0.02, 0.01,
0.53, 0.27, 0.21, 0.205, 0.022, 0.056])
self.step = 0
self.frameskip = 25
self.fulltau = np.zeros(26)
self.np_policy = policy
self.target_sim_cache = []
self.target_hw_cache = []
def compute(self):
if self.step % self.frameskip == 0:
o = np.concatenate([self.world.skeletons[-1].q[6:], self.world.skeletons[-1].dq[6:]])
self.target = self.np_policy.act(o, self.world.time())
self.target_hw_cache.append(toRobot(self.target))
self.target_sim_cache.append(RADIAN2VAL(self.target))
np.savetxt('darwin/feedforward_target_simindex.txt', np.array(self.target_sim_cache, dtype=np.int))
np.savetxt('darwin/feedforward_target_hwindex.txt', np.array(self.target_hw_cache, dtype=np.int))
tau = -self.kp * (self.world.skeletons[-1].q[6:] - self.target) - self.kd * self.world.skeletons[-1].dq[6:]
self.fulltau = np.concatenate([np.zeros(6), tau])
self.step += 1
return np.clip(self.fulltau, -3.5, 3.5) # torque limit of 3.5 Nm
# Set joint damping
for i in range(6, dart_world.skeletons[-1].ndofs):
j = dart_world.skeletons[-1].dof(i)
j.set_damping_coefficient(0.515)
dart_world.set_gravity([0, 0, -9.81])
dart_world.skeletons[1].set_mobile(False)
dart_world.skeletons[1].q = dart_world.skeletons[1].q + 100
dart_world.set_collision_detector(0)
dart_world.skeletons[-1].set_self_collision_check(False)
dart_world.skeletons[0].bodynodes[0].set_friction_coeff(5.0)
for bn in dart_world.skeletons[-1].bodynodes:
bn.set_friction_coeff(5.0)
############################################################################
#### Setup the policy from file ####
#### refer to this part for construction of policy to be run on hardware ###
############################################################################
pose_squat_val = np.array([2509, 2297, 1714, 1508, 1816, 2376,
2047, 2171,
2032, 2039, 2795, 648, 1231, 2040, 2041, 2060, 1281, 3448, 2855, 2073])
pose_stand_val = np.array([1500, 2048, 2048, 2500, 2048, 2048,
2048, 2048,
2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048])
pose_squat = VAL2RADIAN(pose_squat_val)
pose_stand = VAL2RADIAN(pose_stand_val)
# keyframe scheduling for squat stand task
interp_sch = [[0.0, pose_squat],
[3.0, pose_stand],
[4.0, pose_stand],
]
policy = NP_Policy(interp_sch, 'data/darwin_standsquat_policy_conseq_obs_warmstart.pkl', discrete_action=True,
action_bins=np.array([11] * 20), delta_angle_scale=0.3)
############################################################################
# End of setup for policy
# policy should be used for executing on other environments
############################################################################
# Initialize the controller
controller = Controller(dart_world, policy)
dart_world.skeletons[-1].set_controller(controller)
print('create controller OK')
pydart.gui.viewer.launch(dart_world,
default_camera=1) # Use Z-up camera
|
flexible
|
{
"blob_id": "97c5b75323bb143c87972b389e2f27e443c1e00c",
"index": 9945,
"step-1": "<mask token>\n\n\nclass NP_Net_MirrorSym:\n <mask token>\n\n def load_from_file(self, fname):\n params = joblib.load(fname)\n pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]\n obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']\n obrms_count = params[pol_scope + '/obfilter/count:0']\n obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']\n self.obrms_mean = obrms_runningsum / obrms_count\n self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -\n self.obrms_mean ** 2, 0.01, 1000000))\n for i in range(10):\n if pol_scope + '/pol_net/genff' + str(i) + '/w:0' in params:\n W = params[pol_scope + '/pol_net/genff' + str(i) + '/w:0']\n b = params[pol_scope + '/pol_net/genff' + str(i) + '/b:0']\n self.nn_params.append([W, b])\n W_final = params[pol_scope + '/pol_net/genff_out/w:0']\n b_final = params[pol_scope + '/pol_net/genff_out/b:0']\n self.nn_params.append([W_final, b_final])\n\n def get_output(self, input, activation=np.tanh):\n assert self.obrms_mean is not None\n last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,\n 5.0)\n for i in range(len(self.nn_params) - 1):\n last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +\n self.nn_params[i][1])\n out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]\n mirrorlast_out = np.clip((np.dot(input, self.obs_perm_mat) - self.\n obrms_mean) / self.obrms_std, -5.0, 5.0)\n for i in range(len(self.nn_params) - 1):\n mirrorlast_out = activation(np.dot(self.nn_params[i][0].T,\n mirrorlast_out) + self.nn_params[i][1])\n mirrorout = np.dot(np.dot(self.nn_params[-1][0].T, mirrorlast_out) +\n self.nn_params[-1][1], self.act_perm_mat)\n if self.nvec is None:\n return out + mirrorout\n else:\n splitted_out = np.split(out + mirrorout, np.cumsum(self.nvec)[0:-1]\n )\n discrete_out = np.array([np.argmax(prob) for prob in splitted_out])\n return discrete_out\n\n\nclass NP_Policy:\n\n def __init__(self, interp_sch, param_file, discrete_action, action_bins,\n delta_angle_scale, action_filter_size, obs_perm=None, act_perm=None):\n self.interp_sch = interp_sch\n self.obs_cache = []\n self.action_cache = []\n self.action_filter_size = action_filter_size\n if interp_sch is not None:\n self.net = NP_Net()\n else:\n self.net = NP_Net_MirrorSym(action_bins, obs_perm, act_perm)\n self.net.load_from_file(param_file)\n self.discrete_action = discrete_action\n self.delta_angle_scale = delta_angle_scale\n if discrete_action:\n self.net.nvec = action_bins\n\n def get_initial_state(self):\n if self.interp_sch is not None:\n return self.interp_sch[0][1]\n else:\n return 0.5 * (pose_squat + pose_stand)\n\n def reset(self):\n self.action_cache = []\n\n def act(self, o, t):\n new_action = self.net.get_output(o)\n if self.discrete_action:\n new_action = new_action * 1.0 / np.floor(self.net.nvec / 2.0) - 1.0\n self.action_cache.append(new_action)\n if len(self.action_cache) > self.action_filter_size:\n self.action_cache.pop(0)\n filtered_action = np.mean(self.action_cache, axis=0)\n clamped_control = np.clip(filtered_action, -1, 1)\n if self.interp_sch is not None:\n self.ref_target = self.interp_sch[0][1]\n for i in range(len(self.interp_sch) - 1):\n if t >= self.interp_sch[i][0] and t < self.interp_sch[i + 1][0\n ]:\n ratio = (t - self.interp_sch[i][0]) / (self.interp_sch[\n i + 1][0] - self.interp_sch[i][0])\n self.ref_target = ratio * self.interp_sch[i + 1][1] + (\n 1 - ratio) * self.interp_sch[i][1]\n if t > self.interp_sch[-1][0]:\n self.ref_target = self.interp_sch[-1][1]\n target_pose = (self.ref_target + clamped_control * self.\n delta_angle_scale)\n else:\n target_pose = (clamped_control + 1.0) / 2.0 * (\n SIM_CONTROL_UP_BOUND_RAD - SIM_CONTROL_LOW_BOUND_RAD\n ) + SIM_CONTROL_LOW_BOUND_RAD\n target_pose = np.clip(target_pose, SIM_JOINT_LOW_BOUND_RAD,\n SIM_JOINT_UP_BOUND_RAD)\n return target_pose\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NP_Net:\n\n def __init__(self, nvec=None):\n self.obrms_mean = None\n self.obrms_std = None\n self.nn_params = []\n self.nvec = nvec\n <mask token>\n\n def get_output(self, input, activation=np.tanh):\n assert self.obrms_mean is not None\n last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,\n 5.0)\n for i in range(len(self.nn_params) - 1):\n last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +\n self.nn_params[i][1])\n out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]\n if self.nvec is None:\n return out\n else:\n splitted_out = np.split(out, np.cumsum(self.nvec)[0:-1])\n discrete_out = np.array([np.argmax(prob) for prob in splitted_out])\n return discrete_out\n\n\nclass NP_Net_MirrorSym:\n\n def __init__(self, nvec=None, observation_permutation=None,\n action_permutation=None):\n self.obrms_mean = None\n self.obrms_std = None\n self.nn_params = []\n self.nvec = nvec\n obs_perm_mat = np.zeros((len(observation_permutation), len(\n observation_permutation)), dtype=np.float32)\n self.obs_perm_mat = obs_perm_mat\n for i, perm in enumerate(observation_permutation):\n obs_perm_mat[i][int(np.abs(perm))] = np.sign(perm)\n if nvec is None:\n act_perm_mat = np.zeros((len(action_permutation), len(\n action_permutation)), dtype=np.float32)\n self.act_perm_mat = act_perm_mat\n for i, perm in enumerate(action_permutation):\n self.act_perm_mat[i][int(np.abs(perm))] = np.sign(perm)\n else:\n total_dim = int(np.sum(nvec))\n dim_index = np.concatenate([[0], np.cumsum(nvec)])\n act_perm_mat = np.zeros((total_dim, total_dim), dtype=np.float32)\n self.act_perm_mat = act_perm_mat\n for i, perm in enumerate(action_permutation):\n perm_mat = np.identity(nvec[i])\n if np.sign(perm) < 0:\n perm_mat = np.flipud(perm_mat)\n self.act_perm_mat[dim_index[i]:dim_index[i] + nvec[i],\n dim_index[int(np.abs(perm))]:dim_index[int(np.abs(perm)\n )] + nvec[int(np.abs(perm))]] = perm_mat\n\n def load_from_file(self, fname):\n params = joblib.load(fname)\n pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]\n obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']\n obrms_count = params[pol_scope + '/obfilter/count:0']\n obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']\n self.obrms_mean = obrms_runningsum / obrms_count\n self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -\n self.obrms_mean ** 2, 0.01, 1000000))\n for i in range(10):\n if pol_scope + '/pol_net/genff' + str(i) + '/w:0' in params:\n W = params[pol_scope + '/pol_net/genff' + str(i) + '/w:0']\n b = params[pol_scope + '/pol_net/genff' + str(i) + '/b:0']\n self.nn_params.append([W, b])\n W_final = params[pol_scope + '/pol_net/genff_out/w:0']\n b_final = params[pol_scope + '/pol_net/genff_out/b:0']\n self.nn_params.append([W_final, b_final])\n\n def get_output(self, input, activation=np.tanh):\n assert self.obrms_mean is not None\n last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,\n 5.0)\n for i in range(len(self.nn_params) - 1):\n last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +\n self.nn_params[i][1])\n out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]\n mirrorlast_out = np.clip((np.dot(input, self.obs_perm_mat) - self.\n obrms_mean) / self.obrms_std, -5.0, 5.0)\n for i in range(len(self.nn_params) - 1):\n mirrorlast_out = activation(np.dot(self.nn_params[i][0].T,\n mirrorlast_out) + self.nn_params[i][1])\n mirrorout = np.dot(np.dot(self.nn_params[-1][0].T, mirrorlast_out) +\n self.nn_params[-1][1], self.act_perm_mat)\n if self.nvec is None:\n return out + mirrorout\n else:\n splitted_out = np.split(out + mirrorout, np.cumsum(self.nvec)[0:-1]\n )\n discrete_out = np.array([np.argmax(prob) for prob in splitted_out])\n return discrete_out\n\n\nclass NP_Policy:\n\n def __init__(self, interp_sch, param_file, discrete_action, action_bins,\n delta_angle_scale, action_filter_size, obs_perm=None, act_perm=None):\n self.interp_sch = interp_sch\n self.obs_cache = []\n self.action_cache = []\n self.action_filter_size = action_filter_size\n if interp_sch is not None:\n self.net = NP_Net()\n else:\n self.net = NP_Net_MirrorSym(action_bins, obs_perm, act_perm)\n self.net.load_from_file(param_file)\n self.discrete_action = discrete_action\n self.delta_angle_scale = delta_angle_scale\n if discrete_action:\n self.net.nvec = action_bins\n\n def get_initial_state(self):\n if self.interp_sch is not None:\n return self.interp_sch[0][1]\n else:\n return 0.5 * (pose_squat + pose_stand)\n\n def reset(self):\n self.action_cache = []\n\n def act(self, o, t):\n new_action = self.net.get_output(o)\n if self.discrete_action:\n new_action = new_action * 1.0 / np.floor(self.net.nvec / 2.0) - 1.0\n self.action_cache.append(new_action)\n if len(self.action_cache) > self.action_filter_size:\n self.action_cache.pop(0)\n filtered_action = np.mean(self.action_cache, axis=0)\n clamped_control = np.clip(filtered_action, -1, 1)\n if self.interp_sch is not None:\n self.ref_target = self.interp_sch[0][1]\n for i in range(len(self.interp_sch) - 1):\n if t >= self.interp_sch[i][0] and t < self.interp_sch[i + 1][0\n ]:\n ratio = (t - self.interp_sch[i][0]) / (self.interp_sch[\n i + 1][0] - self.interp_sch[i][0])\n self.ref_target = ratio * self.interp_sch[i + 1][1] + (\n 1 - ratio) * self.interp_sch[i][1]\n if t > self.interp_sch[-1][0]:\n self.ref_target = self.interp_sch[-1][1]\n target_pose = (self.ref_target + clamped_control * self.\n delta_angle_scale)\n else:\n target_pose = (clamped_control + 1.0) / 2.0 * (\n SIM_CONTROL_UP_BOUND_RAD - SIM_CONTROL_LOW_BOUND_RAD\n ) + SIM_CONTROL_LOW_BOUND_RAD\n target_pose = np.clip(target_pose, SIM_JOINT_LOW_BOUND_RAD,\n SIM_JOINT_UP_BOUND_RAD)\n return target_pose\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass NP_Net:\n\n def __init__(self, nvec=None):\n self.obrms_mean = None\n self.obrms_std = None\n self.nn_params = []\n self.nvec = nvec\n\n def load_from_file(self, fname):\n params = joblib.load(fname)\n pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]\n obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']\n obrms_count = params[pol_scope + '/obfilter/count:0']\n obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']\n self.obrms_mean = obrms_runningsum / obrms_count\n self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -\n self.obrms_mean ** 2, 0.01, 1000000))\n for i in range(10):\n if pol_scope + '/pol/fc' + str(i) + '/kernel:0' in params:\n W = params[pol_scope + '/pol/fc' + str(i) + '/kernel:0']\n b = params[pol_scope + '/pol/fc' + str(i) + '/bias:0']\n self.nn_params.append([W, b])\n W_final = params[pol_scope + '/pol/final/kernel:0']\n b_final = params[pol_scope + '/pol/final/bias:0']\n self.nn_params.append([W_final, b_final])\n\n def get_output(self, input, activation=np.tanh):\n assert self.obrms_mean is not None\n last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,\n 5.0)\n for i in range(len(self.nn_params) - 1):\n last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +\n self.nn_params[i][1])\n out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]\n if self.nvec is None:\n return out\n else:\n splitted_out = np.split(out, np.cumsum(self.nvec)[0:-1])\n discrete_out = np.array([np.argmax(prob) for prob in splitted_out])\n return discrete_out\n\n\nclass NP_Net_MirrorSym:\n\n def __init__(self, nvec=None, observation_permutation=None,\n action_permutation=None):\n self.obrms_mean = None\n self.obrms_std = None\n self.nn_params = []\n self.nvec = nvec\n obs_perm_mat = np.zeros((len(observation_permutation), len(\n observation_permutation)), dtype=np.float32)\n self.obs_perm_mat = obs_perm_mat\n for i, perm in enumerate(observation_permutation):\n obs_perm_mat[i][int(np.abs(perm))] = np.sign(perm)\n if nvec is None:\n act_perm_mat = np.zeros((len(action_permutation), len(\n action_permutation)), dtype=np.float32)\n self.act_perm_mat = act_perm_mat\n for i, perm in enumerate(action_permutation):\n self.act_perm_mat[i][int(np.abs(perm))] = np.sign(perm)\n else:\n total_dim = int(np.sum(nvec))\n dim_index = np.concatenate([[0], np.cumsum(nvec)])\n act_perm_mat = np.zeros((total_dim, total_dim), dtype=np.float32)\n self.act_perm_mat = act_perm_mat\n for i, perm in enumerate(action_permutation):\n perm_mat = np.identity(nvec[i])\n if np.sign(perm) < 0:\n perm_mat = np.flipud(perm_mat)\n self.act_perm_mat[dim_index[i]:dim_index[i] + nvec[i],\n dim_index[int(np.abs(perm))]:dim_index[int(np.abs(perm)\n )] + nvec[int(np.abs(perm))]] = perm_mat\n\n def load_from_file(self, fname):\n params = joblib.load(fname)\n pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]\n obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']\n obrms_count = params[pol_scope + '/obfilter/count:0']\n obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']\n self.obrms_mean = obrms_runningsum / obrms_count\n self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -\n self.obrms_mean ** 2, 0.01, 1000000))\n for i in range(10):\n if pol_scope + '/pol_net/genff' + str(i) + '/w:0' in params:\n W = params[pol_scope + '/pol_net/genff' + str(i) + '/w:0']\n b = params[pol_scope + '/pol_net/genff' + str(i) + '/b:0']\n self.nn_params.append([W, b])\n W_final = params[pol_scope + '/pol_net/genff_out/w:0']\n b_final = params[pol_scope + '/pol_net/genff_out/b:0']\n self.nn_params.append([W_final, b_final])\n\n def get_output(self, input, activation=np.tanh):\n assert self.obrms_mean is not None\n last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,\n 5.0)\n for i in range(len(self.nn_params) - 1):\n last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +\n self.nn_params[i][1])\n out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]\n mirrorlast_out = np.clip((np.dot(input, self.obs_perm_mat) - self.\n obrms_mean) / self.obrms_std, -5.0, 5.0)\n for i in range(len(self.nn_params) - 1):\n mirrorlast_out = activation(np.dot(self.nn_params[i][0].T,\n mirrorlast_out) + self.nn_params[i][1])\n mirrorout = np.dot(np.dot(self.nn_params[-1][0].T, mirrorlast_out) +\n self.nn_params[-1][1], self.act_perm_mat)\n if self.nvec is None:\n return out + mirrorout\n else:\n splitted_out = np.split(out + mirrorout, np.cumsum(self.nvec)[0:-1]\n )\n discrete_out = np.array([np.argmax(prob) for prob in splitted_out])\n return discrete_out\n\n\nclass NP_Policy:\n\n def __init__(self, interp_sch, param_file, discrete_action, action_bins,\n delta_angle_scale, action_filter_size, obs_perm=None, act_perm=None):\n self.interp_sch = interp_sch\n self.obs_cache = []\n self.action_cache = []\n self.action_filter_size = action_filter_size\n if interp_sch is not None:\n self.net = NP_Net()\n else:\n self.net = NP_Net_MirrorSym(action_bins, obs_perm, act_perm)\n self.net.load_from_file(param_file)\n self.discrete_action = discrete_action\n self.delta_angle_scale = delta_angle_scale\n if discrete_action:\n self.net.nvec = action_bins\n\n def get_initial_state(self):\n if self.interp_sch is not None:\n return self.interp_sch[0][1]\n else:\n return 0.5 * (pose_squat + pose_stand)\n\n def reset(self):\n self.action_cache = []\n\n def act(self, o, t):\n new_action = self.net.get_output(o)\n if self.discrete_action:\n new_action = new_action * 1.0 / np.floor(self.net.nvec / 2.0) - 1.0\n self.action_cache.append(new_action)\n if len(self.action_cache) > self.action_filter_size:\n self.action_cache.pop(0)\n filtered_action = np.mean(self.action_cache, axis=0)\n clamped_control = np.clip(filtered_action, -1, 1)\n if self.interp_sch is not None:\n self.ref_target = self.interp_sch[0][1]\n for i in range(len(self.interp_sch) - 1):\n if t >= self.interp_sch[i][0] and t < self.interp_sch[i + 1][0\n ]:\n ratio = (t - self.interp_sch[i][0]) / (self.interp_sch[\n i + 1][0] - self.interp_sch[i][0])\n self.ref_target = ratio * self.interp_sch[i + 1][1] + (\n 1 - ratio) * self.interp_sch[i][1]\n if t > self.interp_sch[-1][0]:\n self.ref_target = self.interp_sch[-1][1]\n target_pose = (self.ref_target + clamped_control * self.\n delta_angle_scale)\n else:\n target_pose = (clamped_control + 1.0) / 2.0 * (\n SIM_CONTROL_UP_BOUND_RAD - SIM_CONTROL_LOW_BOUND_RAD\n ) + SIM_CONTROL_LOW_BOUND_RAD\n target_pose = np.clip(target_pose, SIM_JOINT_LOW_BOUND_RAD,\n SIM_JOINT_UP_BOUND_RAD)\n return target_pose\n\n\ndef toRobot(positions):\n index = [3, 0, 4, 1, 5, 2, 14, 8, 15, 9, 16, 10, 17, 11, 18, 12, 19, 13,\n 6, 7]\n robotState = np.zeros(len(positions))\n for i in range(len(positions)):\n robotState[i] = int(positions[i] * 180 * (1 / (np.pi * 0.088))) + 2048\n return robotState[index].astype(int)\n\n\n<mask token>\n",
"step-4": "import joblib\nimport numpy as np\nfrom darwin.darwin_utils import *\n\n\nclass NP_Net:\n\n def __init__(self, nvec=None):\n self.obrms_mean = None\n self.obrms_std = None\n self.nn_params = []\n self.nvec = nvec\n\n def load_from_file(self, fname):\n params = joblib.load(fname)\n pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]\n obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']\n obrms_count = params[pol_scope + '/obfilter/count:0']\n obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']\n self.obrms_mean = obrms_runningsum / obrms_count\n self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -\n self.obrms_mean ** 2, 0.01, 1000000))\n for i in range(10):\n if pol_scope + '/pol/fc' + str(i) + '/kernel:0' in params:\n W = params[pol_scope + '/pol/fc' + str(i) + '/kernel:0']\n b = params[pol_scope + '/pol/fc' + str(i) + '/bias:0']\n self.nn_params.append([W, b])\n W_final = params[pol_scope + '/pol/final/kernel:0']\n b_final = params[pol_scope + '/pol/final/bias:0']\n self.nn_params.append([W_final, b_final])\n\n def get_output(self, input, activation=np.tanh):\n assert self.obrms_mean is not None\n last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,\n 5.0)\n for i in range(len(self.nn_params) - 1):\n last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +\n self.nn_params[i][1])\n out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]\n if self.nvec is None:\n return out\n else:\n splitted_out = np.split(out, np.cumsum(self.nvec)[0:-1])\n discrete_out = np.array([np.argmax(prob) for prob in splitted_out])\n return discrete_out\n\n\nclass NP_Net_MirrorSym:\n\n def __init__(self, nvec=None, observation_permutation=None,\n action_permutation=None):\n self.obrms_mean = None\n self.obrms_std = None\n self.nn_params = []\n self.nvec = nvec\n obs_perm_mat = np.zeros((len(observation_permutation), len(\n observation_permutation)), dtype=np.float32)\n self.obs_perm_mat = obs_perm_mat\n for i, perm in enumerate(observation_permutation):\n obs_perm_mat[i][int(np.abs(perm))] = np.sign(perm)\n if nvec is None:\n act_perm_mat = np.zeros((len(action_permutation), len(\n action_permutation)), dtype=np.float32)\n self.act_perm_mat = act_perm_mat\n for i, perm in enumerate(action_permutation):\n self.act_perm_mat[i][int(np.abs(perm))] = np.sign(perm)\n else:\n total_dim = int(np.sum(nvec))\n dim_index = np.concatenate([[0], np.cumsum(nvec)])\n act_perm_mat = np.zeros((total_dim, total_dim), dtype=np.float32)\n self.act_perm_mat = act_perm_mat\n for i, perm in enumerate(action_permutation):\n perm_mat = np.identity(nvec[i])\n if np.sign(perm) < 0:\n perm_mat = np.flipud(perm_mat)\n self.act_perm_mat[dim_index[i]:dim_index[i] + nvec[i],\n dim_index[int(np.abs(perm))]:dim_index[int(np.abs(perm)\n )] + nvec[int(np.abs(perm))]] = perm_mat\n\n def load_from_file(self, fname):\n params = joblib.load(fname)\n pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]\n obrms_runningsumsq = params[pol_scope + '/obfilter/runningsumsq:0']\n obrms_count = params[pol_scope + '/obfilter/count:0']\n obrms_runningsum = params[pol_scope + '/obfilter/runningsum:0']\n self.obrms_mean = obrms_runningsum / obrms_count\n self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count -\n self.obrms_mean ** 2, 0.01, 1000000))\n for i in range(10):\n if pol_scope + '/pol_net/genff' + str(i) + '/w:0' in params:\n W = params[pol_scope + '/pol_net/genff' + str(i) + '/w:0']\n b = params[pol_scope + '/pol_net/genff' + str(i) + '/b:0']\n self.nn_params.append([W, b])\n W_final = params[pol_scope + '/pol_net/genff_out/w:0']\n b_final = params[pol_scope + '/pol_net/genff_out/b:0']\n self.nn_params.append([W_final, b_final])\n\n def get_output(self, input, activation=np.tanh):\n assert self.obrms_mean is not None\n last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0,\n 5.0)\n for i in range(len(self.nn_params) - 1):\n last_out = activation(np.dot(self.nn_params[i][0].T, last_out) +\n self.nn_params[i][1])\n out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]\n mirrorlast_out = np.clip((np.dot(input, self.obs_perm_mat) - self.\n obrms_mean) / self.obrms_std, -5.0, 5.0)\n for i in range(len(self.nn_params) - 1):\n mirrorlast_out = activation(np.dot(self.nn_params[i][0].T,\n mirrorlast_out) + self.nn_params[i][1])\n mirrorout = np.dot(np.dot(self.nn_params[-1][0].T, mirrorlast_out) +\n self.nn_params[-1][1], self.act_perm_mat)\n if self.nvec is None:\n return out + mirrorout\n else:\n splitted_out = np.split(out + mirrorout, np.cumsum(self.nvec)[0:-1]\n )\n discrete_out = np.array([np.argmax(prob) for prob in splitted_out])\n return discrete_out\n\n\nclass NP_Policy:\n\n def __init__(self, interp_sch, param_file, discrete_action, action_bins,\n delta_angle_scale, action_filter_size, obs_perm=None, act_perm=None):\n self.interp_sch = interp_sch\n self.obs_cache = []\n self.action_cache = []\n self.action_filter_size = action_filter_size\n if interp_sch is not None:\n self.net = NP_Net()\n else:\n self.net = NP_Net_MirrorSym(action_bins, obs_perm, act_perm)\n self.net.load_from_file(param_file)\n self.discrete_action = discrete_action\n self.delta_angle_scale = delta_angle_scale\n if discrete_action:\n self.net.nvec = action_bins\n\n def get_initial_state(self):\n if self.interp_sch is not None:\n return self.interp_sch[0][1]\n else:\n return 0.5 * (pose_squat + pose_stand)\n\n def reset(self):\n self.action_cache = []\n\n def act(self, o, t):\n new_action = self.net.get_output(o)\n if self.discrete_action:\n new_action = new_action * 1.0 / np.floor(self.net.nvec / 2.0) - 1.0\n self.action_cache.append(new_action)\n if len(self.action_cache) > self.action_filter_size:\n self.action_cache.pop(0)\n filtered_action = np.mean(self.action_cache, axis=0)\n clamped_control = np.clip(filtered_action, -1, 1)\n if self.interp_sch is not None:\n self.ref_target = self.interp_sch[0][1]\n for i in range(len(self.interp_sch) - 1):\n if t >= self.interp_sch[i][0] and t < self.interp_sch[i + 1][0\n ]:\n ratio = (t - self.interp_sch[i][0]) / (self.interp_sch[\n i + 1][0] - self.interp_sch[i][0])\n self.ref_target = ratio * self.interp_sch[i + 1][1] + (\n 1 - ratio) * self.interp_sch[i][1]\n if t > self.interp_sch[-1][0]:\n self.ref_target = self.interp_sch[-1][1]\n target_pose = (self.ref_target + clamped_control * self.\n delta_angle_scale)\n else:\n target_pose = (clamped_control + 1.0) / 2.0 * (\n SIM_CONTROL_UP_BOUND_RAD - SIM_CONTROL_LOW_BOUND_RAD\n ) + SIM_CONTROL_LOW_BOUND_RAD\n target_pose = np.clip(target_pose, SIM_JOINT_LOW_BOUND_RAD,\n SIM_JOINT_UP_BOUND_RAD)\n return target_pose\n\n\ndef toRobot(positions):\n index = [3, 0, 4, 1, 5, 2, 14, 8, 15, 9, 16, 10, 17, 11, 18, 12, 19, 13,\n 6, 7]\n robotState = np.zeros(len(positions))\n for i in range(len(positions)):\n robotState[i] = int(positions[i] * 180 * (1 / (np.pi * 0.088))) + 2048\n return robotState[index].astype(int)\n\n\nif __name__ == '__main__':\n import pydart2 as pydart\n import gym\n env = gym.make('DartDarwinSquat-v1')\n env.reset()\n dart_world = env.env.dart_world\n\n\n class Controller(object):\n\n def __init__(self, world, policy):\n self.world = world\n self.target = None\n self.kp = np.array([2.1, 1.79, 4.93, 2.0, 2.02, 1.98, 2.2, 2.06,\n 148, 152, 150, 136, 153, 102, 151, 151.4, 150.45, 151.36, \n 154, 105.2])\n self.kd = np.array([0.21, 0.23, 0.22, 0.25, 0.21, 0.26, 0.28, \n 0.213, 0.192, 0.198, 0.22, 0.199, 0.02, 0.01, 0.53, 0.27, \n 0.21, 0.205, 0.022, 0.056])\n self.step = 0\n self.frameskip = 25\n self.fulltau = np.zeros(26)\n self.np_policy = policy\n self.target_sim_cache = []\n self.target_hw_cache = []\n\n def compute(self):\n if self.step % self.frameskip == 0:\n o = np.concatenate([self.world.skeletons[-1].q[6:], self.\n world.skeletons[-1].dq[6:]])\n self.target = self.np_policy.act(o, self.world.time())\n self.target_hw_cache.append(toRobot(self.target))\n self.target_sim_cache.append(RADIAN2VAL(self.target))\n np.savetxt('darwin/feedforward_target_simindex.txt', np.\n array(self.target_sim_cache, dtype=np.int))\n np.savetxt('darwin/feedforward_target_hwindex.txt', np.\n array(self.target_hw_cache, dtype=np.int))\n tau = -self.kp * (self.world.skeletons[-1].q[6:] - self.target\n ) - self.kd * self.world.skeletons[-1].dq[6:]\n self.fulltau = np.concatenate([np.zeros(6), tau])\n self.step += 1\n return np.clip(self.fulltau, -3.5, 3.5)\n for i in range(6, dart_world.skeletons[-1].ndofs):\n j = dart_world.skeletons[-1].dof(i)\n j.set_damping_coefficient(0.515)\n dart_world.set_gravity([0, 0, -9.81])\n dart_world.skeletons[1].set_mobile(False)\n dart_world.skeletons[1].q = dart_world.skeletons[1].q + 100\n dart_world.set_collision_detector(0)\n dart_world.skeletons[-1].set_self_collision_check(False)\n dart_world.skeletons[0].bodynodes[0].set_friction_coeff(5.0)\n for bn in dart_world.skeletons[-1].bodynodes:\n bn.set_friction_coeff(5.0)\n pose_squat_val = np.array([2509, 2297, 1714, 1508, 1816, 2376, 2047, \n 2171, 2032, 2039, 2795, 648, 1231, 2040, 2041, 2060, 1281, 3448, \n 2855, 2073])\n pose_stand_val = np.array([1500, 2048, 2048, 2500, 2048, 2048, 2048, \n 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, \n 2048, 2048])\n pose_squat = VAL2RADIAN(pose_squat_val)\n pose_stand = VAL2RADIAN(pose_stand_val)\n interp_sch = [[0.0, pose_squat], [3.0, pose_stand], [4.0, pose_stand]]\n policy = NP_Policy(interp_sch,\n 'data/darwin_standsquat_policy_conseq_obs_warmstart.pkl',\n discrete_action=True, action_bins=np.array([11] * 20),\n delta_angle_scale=0.3)\n controller = Controller(dart_world, policy)\n dart_world.skeletons[-1].set_controller(controller)\n print('create controller OK')\n pydart.gui.viewer.launch(dart_world, default_camera=1)\n",
"step-5": "################################################################################\n# Controller of the Darwin Squat-Stand task using numpy #\n# Note: all joint data used in this file uses the dof indexing with #\n# from the simulation environment, not the hardware. #\n################################################################################\n\nimport joblib\nimport numpy as np\n\nfrom darwin.darwin_utils import *\n\n# Class for a neural network model in numpy\nclass NP_Net:\n def __init__(self, nvec = None):\n self.obrms_mean = None # for observation running mean std\n self.obrms_std = None # for observation running mean std\n self.nn_params = [] # stores the neural net parameters in the form of [[W0, b0], [W1, b1], ... [Wn, bn]]\n self.nvec = nvec # None if continuous action, otherwise discrete action in the form of\n # [numbins, numbins, ... numbins]\n\n def load_from_file(self, fname):\n params = joblib.load(fname)\n\n pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]\n obrms_runningsumsq = params[pol_scope+'/obfilter/runningsumsq:0']\n obrms_count = params[pol_scope+'/obfilter/count:0']\n obrms_runningsum = params[pol_scope+'/obfilter/runningsum:0']\n\n self.obrms_mean = obrms_runningsum / obrms_count\n self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count - (self.obrms_mean**2), 1e-2, 1000000))\n\n for i in range(10): # assume maximum layer size of 10\n if pol_scope+'/pol/fc'+str(i)+'/kernel:0' in params:\n W = params[pol_scope+'/pol/fc'+str(i)+'/kernel:0']\n b = params[pol_scope+'/pol/fc'+str(i)+'/bias:0']\n self.nn_params.append([W, b])\n W_final = params[pol_scope + '/pol/final/kernel:0']\n b_final = params[pol_scope + '/pol/final/bias:0']\n self.nn_params.append([W_final, b_final])\n\n def get_output(self, input, activation = np.tanh):\n assert self.obrms_mean is not None\n\n last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0, 5.0)\n\n for i in range(len(self.nn_params)-1):\n last_out = activation(np.dot(self.nn_params[i][0].T, last_out) + self.nn_params[i][1])\n out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]\n\n if self.nvec is None:\n return out\n else:\n # convert for discrete output\n splitted_out = np.split(out, np.cumsum(self.nvec)[0:-1])\n discrete_out = np.array([np.argmax(prob) for prob in splitted_out])\n return discrete_out\n\n# Class for a neural network model with mirror symmetry in numpy\nclass NP_Net_MirrorSym:\n def __init__(self, nvec = None, observation_permutation=None,action_permutation=None):\n self.obrms_mean = None # for observation running mean std\n self.obrms_std = None # for observation running mean std\n self.nn_params = [] # stores the neural net parameters in the form of [[W0, b0], [W1, b1], ... [Wn, bn]]\n self.nvec = nvec # None if continuous action, otherwise discrete action in the form of\n # [numbins, numbins, ... numbins]\n\n obs_perm_mat = np.zeros((len(observation_permutation), len(observation_permutation)), dtype=np.float32)\n self.obs_perm_mat = obs_perm_mat\n for i, perm in enumerate(observation_permutation):\n obs_perm_mat[i][int(np.abs(perm))] = np.sign(perm)\n\n if nvec is None:\n act_perm_mat = np.zeros((len(action_permutation), len(action_permutation)), dtype=np.float32)\n self.act_perm_mat = act_perm_mat\n for i, perm in enumerate(action_permutation):\n self.act_perm_mat[i][int(np.abs(perm))] = np.sign(perm)\n else:\n total_dim = int(np.sum(nvec))\n dim_index = np.concatenate([[0], np.cumsum(nvec)])\n act_perm_mat = np.zeros((total_dim, total_dim), dtype=np.float32)\n self.act_perm_mat = act_perm_mat\n for i, perm in enumerate(action_permutation):\n perm_mat = np.identity(nvec[i])\n if np.sign(perm) < 0:\n perm_mat = np.flipud(perm_mat)\n self.act_perm_mat[dim_index[i]:dim_index[i] + nvec[i],\n dim_index[int(np.abs(perm))]:dim_index[int(np.abs(perm))] + nvec[int(np.abs(perm))]] = perm_mat\n\n def load_from_file(self, fname):\n params = joblib.load(fname)\n\n pol_scope = list(params.keys())[0][0:list(params.keys())[0].find('/')]\n obrms_runningsumsq = params[pol_scope+'/obfilter/runningsumsq:0']\n obrms_count = params[pol_scope+'/obfilter/count:0']\n obrms_runningsum = params[pol_scope+'/obfilter/runningsum:0']\n\n self.obrms_mean = obrms_runningsum / obrms_count\n self.obrms_std = np.sqrt(np.clip(obrms_runningsumsq / obrms_count - (self.obrms_mean**2), 1e-2, 1000000))\n\n for i in range(10): # assume maximum layer size of 10\n if pol_scope+'/pol_net/genff'+str(i)+'/w:0' in params:\n W = params[pol_scope+'/pol_net/genff'+str(i)+'/w:0']\n b = params[pol_scope+'/pol_net/genff'+str(i)+'/b:0']\n self.nn_params.append([W, b])\n W_final = params[pol_scope + '/pol_net/genff_out/w:0']\n b_final = params[pol_scope + '/pol_net/genff_out/b:0']\n self.nn_params.append([W_final, b_final])\n\n def get_output(self, input, activation = np.tanh):\n assert self.obrms_mean is not None\n\n last_out = np.clip((input - self.obrms_mean) / self.obrms_std, -5.0, 5.0)\n\n for i in range(len(self.nn_params)-1):\n last_out = activation(np.dot(self.nn_params[i][0].T, last_out) + self.nn_params[i][1])\n out = np.dot(self.nn_params[-1][0].T, last_out) + self.nn_params[-1][1]\n\n mirrorlast_out = np.clip((np.dot(input, self.obs_perm_mat) - self.obrms_mean) / self.obrms_std, -5.0, 5.0)\n for i in range(len(self.nn_params) - 1):\n mirrorlast_out = activation(np.dot(self.nn_params[i][0].T, mirrorlast_out) + self.nn_params[i][1])\n mirrorout = np.dot(np.dot(self.nn_params[-1][0].T, mirrorlast_out) + self.nn_params[-1][1], self.act_perm_mat)\n\n if self.nvec is None:\n return out + mirrorout\n else:\n # convert for discrete output\n splitted_out = np.split(out + mirrorout, np.cumsum(self.nvec)[0:-1])\n discrete_out = np.array([np.argmax(prob) for prob in splitted_out])\n return discrete_out\n\n# Class for a neural network policy in numpy\n# Includes the action filtering and pose interpolation\nclass NP_Policy:\n # interp_sch makes the feed-forward motion\n # interp_sch contains the timing and pose id throughout the trajectory\n def __init__(self, interp_sch, param_file, discrete_action, action_bins, delta_angle_scale, action_filter_size,\n obs_perm = None, act_perm = None):\n self.interp_sch = interp_sch\n self.obs_cache = []\n self.action_cache = []\n self.action_filter_size = action_filter_size\n if interp_sch is not None:\n self.net = NP_Net()\n else:\n self.net = NP_Net_MirrorSym(action_bins, obs_perm, act_perm)\n self.net.load_from_file(param_file)\n self.discrete_action = discrete_action\n self.delta_angle_scale = delta_angle_scale\n if discrete_action:\n self.net.nvec = action_bins\n\n # Get the initial state for the robot\n # RETURN: a 20d vector for the robot pose\n def get_initial_state(self):\n if self.interp_sch is not None:\n return self.interp_sch[0][1]\n else:\n return 0.5*(pose_squat + pose_stand)\n\n # Reset the state of the policy\n # This is needed because the action cache essentially forms a memory in the policy\n def reset(self):\n self.action_cache = []\n\n # Return the action to be taken by the robot given the observation and current time\n # INPUT: o, a 40d vector containing the pose and velocity of the robot\n # t, current time in seconds, used to get the reference pose\n # RETURN: a 20d vector containing the target angle (in radians) for the robot joints\n def act(self, o, t):\n # get network output action\n new_action = self.net.get_output(o)\n\n if self.discrete_action:\n new_action = new_action * 1.0 / np.floor(self.net.nvec/2.0) - 1.0\n\n self.action_cache.append(new_action)\n if len(self.action_cache) > self.action_filter_size:\n self.action_cache.pop(0)\n filtered_action = np.mean(self.action_cache, axis=0)\n\n # get feedforward action\n clamped_control = np.clip(filtered_action, -1, 1)\n\n if self.interp_sch is not None:\n self.ref_target = self.interp_sch[0][1]\n for i in range(len(self.interp_sch) - 1):\n if t >= self.interp_sch[i][0] and t < self.interp_sch[i + 1][0]:\n ratio = (t - self.interp_sch[i][0]) / (self.interp_sch[i + 1][0] - self.interp_sch[i][0])\n self.ref_target = ratio * self.interp_sch[i + 1][1] + (1 - ratio) * self.interp_sch[i][1]\n if t > self.interp_sch[-1][0]:\n self.ref_target = self.interp_sch[-1][1]\n\n # combine policy output and keyframe interpolation to get the target joint positions\n target_pose = self.ref_target + clamped_control * self.delta_angle_scale\n else:\n target_pose = (clamped_control + 1.0) / 2.0 * (SIM_CONTROL_UP_BOUND_RAD - SIM_CONTROL_LOW_BOUND_RAD) + SIM_CONTROL_LOW_BOUND_RAD\n target_pose = np.clip(target_pose, SIM_JOINT_LOW_BOUND_RAD, SIM_JOINT_UP_BOUND_RAD)\n\n return target_pose\n\n\n\n\ndef toRobot(positions):\n # reorder joints\n index = [3,0,4,1,5,2,14,8,15,9,16,10,17,11,18,12,19,13,6,7]\n # convert from radians to int\n robotState = np.zeros(len(positions))\n for i in range(len(positions)):\n robotState[i] = int(positions[i]*180*(1/(np.pi*0.088))) + 2048\n\n return robotState[index].astype(int)\n\n\n#######################################\n# test the file in pydart2 simulation #\n#######################################\nif __name__ == \"__main__\":\n import pydart2 as pydart\n import gym\n\n env = gym.make('DartDarwinSquat-v1') # use the dart_world in the gym environment to avoid copying the data\n env.reset()\n dart_world = env.env.dart_world\n\n class Controller(object):\n def __init__(self, world, policy):\n self.world = world\n self.target = None\n self.kp = np.array([2.1, 1.79, 4.93,\n 2.0, 2.02, 1.98,\n 2.2, 2.06,\n 148, 152, 150, 136, 153, 102,\n 151, 151.4, 150.45, 151.36, 154, 105.2])\n self.kd = np.array([0.21, 0.23, 0.22,\n 0.25, 0.21, 0.26,\n 0.28, 0.213\n , 0.192, 0.198, 0.22, 0.199, 0.02, 0.01,\n 0.53, 0.27, 0.21, 0.205, 0.022, 0.056])\n self.step = 0\n self.frameskip = 25\n self.fulltau = np.zeros(26)\n self.np_policy = policy\n self.target_sim_cache = []\n self.target_hw_cache = []\n\n\n def compute(self):\n if self.step % self.frameskip == 0:\n o = np.concatenate([self.world.skeletons[-1].q[6:], self.world.skeletons[-1].dq[6:]])\n self.target = self.np_policy.act(o, self.world.time())\n self.target_hw_cache.append(toRobot(self.target))\n self.target_sim_cache.append(RADIAN2VAL(self.target))\n np.savetxt('darwin/feedforward_target_simindex.txt', np.array(self.target_sim_cache, dtype=np.int))\n np.savetxt('darwin/feedforward_target_hwindex.txt', np.array(self.target_hw_cache, dtype=np.int))\n tau = -self.kp * (self.world.skeletons[-1].q[6:] - self.target) - self.kd * self.world.skeletons[-1].dq[6:]\n self.fulltau = np.concatenate([np.zeros(6), tau])\n self.step += 1\n return np.clip(self.fulltau, -3.5, 3.5) # torque limit of 3.5 Nm\n\n\n # Set joint damping\n for i in range(6, dart_world.skeletons[-1].ndofs):\n j = dart_world.skeletons[-1].dof(i)\n j.set_damping_coefficient(0.515)\n\n dart_world.set_gravity([0, 0, -9.81])\n dart_world.skeletons[1].set_mobile(False)\n dart_world.skeletons[1].q = dart_world.skeletons[1].q + 100\n dart_world.set_collision_detector(0)\n dart_world.skeletons[-1].set_self_collision_check(False)\n\n dart_world.skeletons[0].bodynodes[0].set_friction_coeff(5.0)\n for bn in dart_world.skeletons[-1].bodynodes:\n bn.set_friction_coeff(5.0)\n\n ############################################################################\n #### Setup the policy from file ####\n #### refer to this part for construction of policy to be run on hardware ###\n ############################################################################\n pose_squat_val = np.array([2509, 2297, 1714, 1508, 1816, 2376,\n 2047, 2171,\n 2032, 2039, 2795, 648, 1231, 2040, 2041, 2060, 1281, 3448, 2855, 2073])\n pose_stand_val = np.array([1500, 2048, 2048, 2500, 2048, 2048,\n 2048, 2048,\n 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048, 2048])\n\n pose_squat = VAL2RADIAN(pose_squat_val)\n pose_stand = VAL2RADIAN(pose_stand_val)\n\n # keyframe scheduling for squat stand task\n interp_sch = [[0.0, pose_squat],\n [3.0, pose_stand],\n [4.0, pose_stand],\n ]\n policy = NP_Policy(interp_sch, 'data/darwin_standsquat_policy_conseq_obs_warmstart.pkl', discrete_action=True,\n action_bins=np.array([11] * 20), delta_angle_scale=0.3)\n ############################################################################\n # End of setup for policy\n # policy should be used for executing on other environments\n ############################################################################\n\n # Initialize the controller\n controller = Controller(dart_world, policy)\n dart_world.skeletons[-1].set_controller(controller)\n print('create controller OK')\n\n pydart.gui.viewer.launch(dart_world,\n default_camera=1) # Use Z-up camera\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
8,
12,
14,
16,
17
]
}
|
[
8,
12,
14,
16,
17
] |
import os
from typing import List, Optional, Sequence
import boto3
from google.cloud import storage
from ..globals import GLOBALS, LOGGER
def set_gcs_credentials():
if os.path.exists(GLOBALS.google_application_credentials):
return
secrets_client = boto3.client(
"secretsmanager",
region_name=GLOBALS.aws_region,
endpoint_url=GLOBALS.aws_endpoint_uri,
)
response = secrets_client.get_secret_value(SecretId=GLOBALS.gcs_key_secret_arn)
os.makedirs(
os.path.dirname(GLOBALS.google_application_credentials),
exist_ok=True,
)
with open(GLOBALS.google_application_credentials, "w") as f:
f.write(response["SecretString"])
def get_gs_files(
bucket: str,
prefix: str,
limit: Optional[int] = None,
exit_after_max: Optional[int] = None,
extensions: Sequence[str] = tuple(),
) -> List[str]:
"""Get all matching files in GCS.
Adapted from data API.
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
matches: List[str] = list()
num_matches: int = 0
blobs = list(storage_client.list_blobs(bucket, prefix=prefix, max_results=limit))
LOGGER.info(f"Found files under gs://{bucket}/{prefix}: {blobs}")
for blob in blobs:
if not extensions or any(blob.name.endswith(ext) for ext in extensions):
matches.append(blob.name)
num_matches += 1
if exit_after_max and num_matches >= exit_after_max:
break
return matches
def get_gs_subfolders(
bucket: str,
prefix: str,
) -> List[str]:
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
delimiter = "/"
if not prefix.endswith(delimiter):
prefix = prefix + delimiter
blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=delimiter)
try:
_ = next(blobs)
except StopIteration:
pass
found_prefixes = [
found_prefix.lstrip(prefix).strip("/") for found_prefix in blobs.prefixes
]
return found_prefixes
def get_gs_file_as_text(
bucket: str,
key: str,
) -> str:
"""
Get contents of a file as a string
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
blob = storage_client.get_bucket(bucket).get_blob(key)
return blob.download_as_text(encoding="utf-8")
|
normal
|
{
"blob_id": "a5eeafef694db04770833a4063358e8f32f467b0",
"index": 8310,
"step-1": "<mask token>\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\ndef get_gs_subfolders(bucket: str, prefix: str) ->List[str]:\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n delimiter = '/'\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=\n delimiter)\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n found_prefixes = [found_prefix.lstrip(prefix).strip('/') for\n found_prefix in blobs.prefixes]\n return found_prefixes\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\ndef get_gs_subfolders(bucket: str, prefix: str) ->List[str]:\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n delimiter = '/'\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=\n delimiter)\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n found_prefixes = [found_prefix.lstrip(prefix).strip('/') for\n found_prefix in blobs.prefixes]\n return found_prefixes\n\n\ndef get_gs_file_as_text(bucket: str, key: str) ->str:\n \"\"\"\n Get contents of a file as a string\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n blob = storage_client.get_bucket(bucket).get_blob(key)\n return blob.download_as_text(encoding='utf-8')\n",
"step-4": "import os\nfrom typing import List, Optional, Sequence\nimport boto3\nfrom google.cloud import storage\nfrom ..globals import GLOBALS, LOGGER\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\ndef get_gs_subfolders(bucket: str, prefix: str) ->List[str]:\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n delimiter = '/'\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=\n delimiter)\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n found_prefixes = [found_prefix.lstrip(prefix).strip('/') for\n found_prefix in blobs.prefixes]\n return found_prefixes\n\n\ndef get_gs_file_as_text(bucket: str, key: str) ->str:\n \"\"\"\n Get contents of a file as a string\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n blob = storage_client.get_bucket(bucket).get_blob(key)\n return blob.download_as_text(encoding='utf-8')\n",
"step-5": "import os\nfrom typing import List, Optional, Sequence\n\nimport boto3\nfrom google.cloud import storage\n\nfrom ..globals import GLOBALS, LOGGER\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n\n secrets_client = boto3.client(\n \"secretsmanager\",\n region_name=GLOBALS.aws_region,\n endpoint_url=GLOBALS.aws_endpoint_uri,\n )\n\n response = secrets_client.get_secret_value(SecretId=GLOBALS.gcs_key_secret_arn)\n\n os.makedirs(\n os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True,\n )\n\n with open(GLOBALS.google_application_credentials, \"w\") as f:\n f.write(response[\"SecretString\"])\n\n\ndef get_gs_files(\n bucket: str,\n prefix: str,\n limit: Optional[int] = None,\n exit_after_max: Optional[int] = None,\n extensions: Sequence[str] = tuple(),\n) -> List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n\n storage_client = storage.Client.from_service_account_json(\n GLOBALS.google_application_credentials\n )\n\n matches: List[str] = list()\n num_matches: int = 0\n\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix, max_results=limit))\n\n LOGGER.info(f\"Found files under gs://{bucket}/{prefix}: {blobs}\")\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n\n return matches\n\n\ndef get_gs_subfolders(\n bucket: str,\n prefix: str,\n) -> List[str]:\n set_gcs_credentials()\n\n storage_client = storage.Client.from_service_account_json(\n GLOBALS.google_application_credentials\n )\n\n delimiter = \"/\"\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=delimiter)\n\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n\n found_prefixes = [\n found_prefix.lstrip(prefix).strip(\"/\") for found_prefix in blobs.prefixes\n ]\n\n return found_prefixes\n\n\ndef get_gs_file_as_text(\n bucket: str,\n key: str,\n) -> str:\n \"\"\"\n Get contents of a file as a string\n \"\"\"\n set_gcs_credentials()\n\n storage_client = storage.Client.from_service_account_json(\n GLOBALS.google_application_credentials\n )\n\n blob = storage_client.get_bucket(bucket).get_blob(key)\n return blob.download_as_text(encoding=\"utf-8\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class DGMSOLVER:
def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,
vacuum=False, k=None, phi=None, psi=None):
"""
Inputs:
G - Number of energy groups
fname - Name of the cross section file
fm - Fine mesh
cm - Coarse mesh
mm - Material map
nPin - Number of pincells
norm - norm of the flux to keep constant (match phi shape)
mapping - structure class that holds fine -> coarse mapping
"""
self.G = G
self.fname = fname
self.fm = fm
self.cm = cm
self.mm = mm
self.npin = nPin
self.norm = norm
self.computenorm = self.norm is None
self.vacuum = vacuum
self.mapping = mapping
self.setOptions()
self.solve(k, phi, psi)
self.homogenize_space()
if self.mapping is not None:
self.homogenize_energy()
<|reserved_special_token_0|>
def solve(self, k, phi, psi):
"""
Solve the problem using Unotran
"""
pydgm.solver.initialize_solver()
if k is not None:
pydgm.state.keff = k
if phi is not None:
pydgm.state.phi = phi
if psi is not None:
pydgm.state.psi = psi
pydgm.solver.solve()
self.extractInfo()
self.iter_k = np.copy(pydgm.state.keff)
self.iter_phi = np.copy(pydgm.state.phi)
self.iter_psi = np.copy(pydgm.state.psi)
pydgm.solver.finalize_solver()
pydgm.control.finalize_control()
def extractInfo(self):
"""
Copy information from Unotran before the solver is deallocated
"""
self.phi = np.copy(pydgm.state.mg_phi[0])
self.dx = np.copy(pydgm.mesh.dx)
self.mat_map = np.copy(pydgm.state.mg_mmap)
self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for
c in range(len(self.mat_map))]).T
self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c
] - 1] for c in range(len(self.mat_map))]).T
self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -
1] for c in range(len(self.mat_map))]).T
self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for
c in range(len(self.mat_map))]).T
def homogenize_space(self):
"""
Homogenize the cross sections over the spatial region
"""
def homo_space(array):
"""Convenience function to do the integration"""
return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2
) / V
shape = self.phi.shape
nCellPerPin = shape[1] // self.npin
V = np.sum(self.dx.reshape(self.npin, -1), axis=1)
phi_dx = self.phi[:, :] * self.dx[:]
self.phi_homo = homo_space(phi_dx)
if self.computenorm:
self.norm = np.sum(self.phi_homo, axis=-1)
else:
print('compute norm')
norm = self.norm / np.sum(self.phi_homo, axis=-1)
self.phi_homo *= norm[:, np.newaxis]
phi_dx *= norm[:, np.newaxis]
self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo
self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo
self.chi_homo = homo_space(self.chi * self.dx)
self.sig_s_homo = np.zeros((self.G, self.G, self.npin))
for gp in range(self.G):
self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx
) / self.phi_homo
def homogenize_energy(self):
"""
Homogenize the cross sections over the energy range
"""
def homo_energy(array1, array2=None):
"""
convinence function to do the integration
return rac{\\sum_i array1[i] * array2[i]}{\\sum_i array2[i]} for each coarse group
"""
if array2 is not None:
y = np.zeros((nCG, len(array1[0])))
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g] * array2[g]
y[cg - 1] += array2[g]
return z / y
else:
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g]
return z
nCG = self.mapping.nCG
nFG = self.mapping.nFG
grouping = np.array(self.mapping.grouping)
dE_coarse = np.array(self.mapping.dE_coarse)
dE_fine = np.array(self.mapping.dE_fine)
dE_coarse /= dE_coarse
dE_fine /= dE_fine
phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])
if self.computenorm:
norm = np.zeros(nCG)
for g, cg in enumerate(grouping):
norm[cg - 1] += self.norm[g]
self.norm = norm
"""
print(self.mapping.fine_bounds)
import matplotlib.pyplot as plt
def barchart(x, y):
X = np.zeros(2 * len(y))
Y = np.zeros(2 * len(y))
for i in range(0, len(y)):
X[2 * i] = x[i]
X[2 * i + 1] = x[i + 1]
Y[2 * i] = y[i]
Y[2 * i + 1] = y[i]
return X, Y
plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')
"""
self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)
self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)
self.chi_homo = homo_energy(self.chi_homo)
sig_s_homo = np.zeros((nCG, nCG, self.npin))
for gp, g in enumerate(grouping):
sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo
)
self.sig_s_homo = sig_s_homo
self.phi_homo = phi_homo
"""
plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')
plt.legend(loc=0)
plt.xlabel('Energy [MeV]')
plt.ylabel('$\\Sigma_t$ [cm$^{-1}$]')
plt.savefig('test.pdf', transparent=True)
"""
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class XS:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DGMSOLVER:
def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,
vacuum=False, k=None, phi=None, psi=None):
"""
Inputs:
G - Number of energy groups
fname - Name of the cross section file
fm - Fine mesh
cm - Coarse mesh
mm - Material map
nPin - Number of pincells
norm - norm of the flux to keep constant (match phi shape)
mapping - structure class that holds fine -> coarse mapping
"""
self.G = G
self.fname = fname
self.fm = fm
self.cm = cm
self.mm = mm
self.npin = nPin
self.norm = norm
self.computenorm = self.norm is None
self.vacuum = vacuum
self.mapping = mapping
self.setOptions()
self.solve(k, phi, psi)
self.homogenize_space()
if self.mapping is not None:
self.homogenize_energy()
def setOptions(self):
"""
Set the options for the Unotran solve
"""
pydgm.control.spatial_dimension = 1
pydgm.control.fine_mesh_x = self.fm
pydgm.control.coarse_mesh_x = self.cm
pydgm.control.material_map = self.mm
pydgm.control.xs_name = self.fname.ljust(256)
pydgm.control.angle_order = 8
pydgm.control.angle_option = pydgm.angle.gl
pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0
pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0
pydgm.control.allow_fission = True
pydgm.control.eigen_print = 0
pydgm.control.outer_print = 0
pydgm.control.eigen_tolerance = 1e-14
pydgm.control.outer_tolerance = 1e-12
pydgm.control.max_eigen_iters = 10000
pydgm.control.max_outer_iters = 1
pydgm.control.store_psi = True
pydgm.control.solver_type = 'eigen'.ljust(256)
pydgm.control.source_value = 0.0
pydgm.control.equation_type = 'DD'
pydgm.control.scatter_leg_order = 0
pydgm.control.ignore_warnings = True
def solve(self, k, phi, psi):
"""
Solve the problem using Unotran
"""
pydgm.solver.initialize_solver()
if k is not None:
pydgm.state.keff = k
if phi is not None:
pydgm.state.phi = phi
if psi is not None:
pydgm.state.psi = psi
pydgm.solver.solve()
self.extractInfo()
self.iter_k = np.copy(pydgm.state.keff)
self.iter_phi = np.copy(pydgm.state.phi)
self.iter_psi = np.copy(pydgm.state.psi)
pydgm.solver.finalize_solver()
pydgm.control.finalize_control()
def extractInfo(self):
"""
Copy information from Unotran before the solver is deallocated
"""
self.phi = np.copy(pydgm.state.mg_phi[0])
self.dx = np.copy(pydgm.mesh.dx)
self.mat_map = np.copy(pydgm.state.mg_mmap)
self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for
c in range(len(self.mat_map))]).T
self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c
] - 1] for c in range(len(self.mat_map))]).T
self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -
1] for c in range(len(self.mat_map))]).T
self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for
c in range(len(self.mat_map))]).T
def homogenize_space(self):
"""
Homogenize the cross sections over the spatial region
"""
def homo_space(array):
"""Convenience function to do the integration"""
return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2
) / V
shape = self.phi.shape
nCellPerPin = shape[1] // self.npin
V = np.sum(self.dx.reshape(self.npin, -1), axis=1)
phi_dx = self.phi[:, :] * self.dx[:]
self.phi_homo = homo_space(phi_dx)
if self.computenorm:
self.norm = np.sum(self.phi_homo, axis=-1)
else:
print('compute norm')
norm = self.norm / np.sum(self.phi_homo, axis=-1)
self.phi_homo *= norm[:, np.newaxis]
phi_dx *= norm[:, np.newaxis]
self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo
self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo
self.chi_homo = homo_space(self.chi * self.dx)
self.sig_s_homo = np.zeros((self.G, self.G, self.npin))
for gp in range(self.G):
self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx
) / self.phi_homo
def homogenize_energy(self):
"""
Homogenize the cross sections over the energy range
"""
def homo_energy(array1, array2=None):
"""
convinence function to do the integration
return rac{\\sum_i array1[i] * array2[i]}{\\sum_i array2[i]} for each coarse group
"""
if array2 is not None:
y = np.zeros((nCG, len(array1[0])))
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g] * array2[g]
y[cg - 1] += array2[g]
return z / y
else:
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g]
return z
nCG = self.mapping.nCG
nFG = self.mapping.nFG
grouping = np.array(self.mapping.grouping)
dE_coarse = np.array(self.mapping.dE_coarse)
dE_fine = np.array(self.mapping.dE_fine)
dE_coarse /= dE_coarse
dE_fine /= dE_fine
phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])
if self.computenorm:
norm = np.zeros(nCG)
for g, cg in enumerate(grouping):
norm[cg - 1] += self.norm[g]
self.norm = norm
"""
print(self.mapping.fine_bounds)
import matplotlib.pyplot as plt
def barchart(x, y):
X = np.zeros(2 * len(y))
Y = np.zeros(2 * len(y))
for i in range(0, len(y)):
X[2 * i] = x[i]
X[2 * i + 1] = x[i + 1]
Y[2 * i] = y[i]
Y[2 * i + 1] = y[i]
return X, Y
plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')
"""
self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)
self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)
self.chi_homo = homo_energy(self.chi_homo)
sig_s_homo = np.zeros((nCG, nCG, self.npin))
for gp, g in enumerate(grouping):
sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo
)
self.sig_s_homo = sig_s_homo
self.phi_homo = phi_homo
"""
plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')
plt.legend(loc=0)
plt.xlabel('Energy [MeV]')
plt.ylabel('$\\Sigma_t$ [cm$^{-1}$]')
plt.savefig('test.pdf', transparent=True)
"""
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class XS:
<|reserved_special_token_0|>
def write_homogenized_XS(self, fname, mu=None):
if mu is not None:
assert mu.shape == self.sig_t.shape
self.mu = mu
G, npin = self.sig_t.shape
sig_t = self.sig_t * self.mu
vsig_f = self.sig_f * self.mu
sig_s = self.sig_s * self.mu
s = '{} {} 0\n'.format(npin, G)
s += '{}\n'.format(' '.join([str(g) for g in range(G + 1)]))
s += '{}\n'.format(' '.join([str(g) for g in range(G)]))
for mat in range(npin):
s += 'pin {}\n'.format(mat + 1)
s += '1 1 1.0 0.0 0.602214179\n'
for g in range(G):
s += '{:<12.9f} {:<12.9f} {:<12.9f} {:<12.9f}\n'.format(sig_t
[g, mat], vsig_f[g, mat], vsig_f[g, mat], self.chi[g, mat])
for g in range(G):
s += '{}\n'.format(' '.join(['{:<12.9f}'.format(s) for s in
sig_s[:, g, mat]]))
with open(fname, 'w') as f:
f.write(s[:-1])
<|reserved_special_token_0|>
class DGMSOLVER:
def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,
vacuum=False, k=None, phi=None, psi=None):
"""
Inputs:
G - Number of energy groups
fname - Name of the cross section file
fm - Fine mesh
cm - Coarse mesh
mm - Material map
nPin - Number of pincells
norm - norm of the flux to keep constant (match phi shape)
mapping - structure class that holds fine -> coarse mapping
"""
self.G = G
self.fname = fname
self.fm = fm
self.cm = cm
self.mm = mm
self.npin = nPin
self.norm = norm
self.computenorm = self.norm is None
self.vacuum = vacuum
self.mapping = mapping
self.setOptions()
self.solve(k, phi, psi)
self.homogenize_space()
if self.mapping is not None:
self.homogenize_energy()
def setOptions(self):
"""
Set the options for the Unotran solve
"""
pydgm.control.spatial_dimension = 1
pydgm.control.fine_mesh_x = self.fm
pydgm.control.coarse_mesh_x = self.cm
pydgm.control.material_map = self.mm
pydgm.control.xs_name = self.fname.ljust(256)
pydgm.control.angle_order = 8
pydgm.control.angle_option = pydgm.angle.gl
pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0
pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0
pydgm.control.allow_fission = True
pydgm.control.eigen_print = 0
pydgm.control.outer_print = 0
pydgm.control.eigen_tolerance = 1e-14
pydgm.control.outer_tolerance = 1e-12
pydgm.control.max_eigen_iters = 10000
pydgm.control.max_outer_iters = 1
pydgm.control.store_psi = True
pydgm.control.solver_type = 'eigen'.ljust(256)
pydgm.control.source_value = 0.0
pydgm.control.equation_type = 'DD'
pydgm.control.scatter_leg_order = 0
pydgm.control.ignore_warnings = True
def solve(self, k, phi, psi):
"""
Solve the problem using Unotran
"""
pydgm.solver.initialize_solver()
if k is not None:
pydgm.state.keff = k
if phi is not None:
pydgm.state.phi = phi
if psi is not None:
pydgm.state.psi = psi
pydgm.solver.solve()
self.extractInfo()
self.iter_k = np.copy(pydgm.state.keff)
self.iter_phi = np.copy(pydgm.state.phi)
self.iter_psi = np.copy(pydgm.state.psi)
pydgm.solver.finalize_solver()
pydgm.control.finalize_control()
def extractInfo(self):
"""
Copy information from Unotran before the solver is deallocated
"""
self.phi = np.copy(pydgm.state.mg_phi[0])
self.dx = np.copy(pydgm.mesh.dx)
self.mat_map = np.copy(pydgm.state.mg_mmap)
self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for
c in range(len(self.mat_map))]).T
self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c
] - 1] for c in range(len(self.mat_map))]).T
self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -
1] for c in range(len(self.mat_map))]).T
self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for
c in range(len(self.mat_map))]).T
def homogenize_space(self):
"""
Homogenize the cross sections over the spatial region
"""
def homo_space(array):
"""Convenience function to do the integration"""
return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2
) / V
shape = self.phi.shape
nCellPerPin = shape[1] // self.npin
V = np.sum(self.dx.reshape(self.npin, -1), axis=1)
phi_dx = self.phi[:, :] * self.dx[:]
self.phi_homo = homo_space(phi_dx)
if self.computenorm:
self.norm = np.sum(self.phi_homo, axis=-1)
else:
print('compute norm')
norm = self.norm / np.sum(self.phi_homo, axis=-1)
self.phi_homo *= norm[:, np.newaxis]
phi_dx *= norm[:, np.newaxis]
self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo
self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo
self.chi_homo = homo_space(self.chi * self.dx)
self.sig_s_homo = np.zeros((self.G, self.G, self.npin))
for gp in range(self.G):
self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx
) / self.phi_homo
def homogenize_energy(self):
"""
Homogenize the cross sections over the energy range
"""
def homo_energy(array1, array2=None):
"""
convinence function to do the integration
return rac{\\sum_i array1[i] * array2[i]}{\\sum_i array2[i]} for each coarse group
"""
if array2 is not None:
y = np.zeros((nCG, len(array1[0])))
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g] * array2[g]
y[cg - 1] += array2[g]
return z / y
else:
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g]
return z
nCG = self.mapping.nCG
nFG = self.mapping.nFG
grouping = np.array(self.mapping.grouping)
dE_coarse = np.array(self.mapping.dE_coarse)
dE_fine = np.array(self.mapping.dE_fine)
dE_coarse /= dE_coarse
dE_fine /= dE_fine
phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])
if self.computenorm:
norm = np.zeros(nCG)
for g, cg in enumerate(grouping):
norm[cg - 1] += self.norm[g]
self.norm = norm
"""
print(self.mapping.fine_bounds)
import matplotlib.pyplot as plt
def barchart(x, y):
X = np.zeros(2 * len(y))
Y = np.zeros(2 * len(y))
for i in range(0, len(y)):
X[2 * i] = x[i]
X[2 * i + 1] = x[i + 1]
Y[2 * i] = y[i]
Y[2 * i + 1] = y[i]
return X, Y
plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')
"""
self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)
self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)
self.chi_homo = homo_energy(self.chi_homo)
sig_s_homo = np.zeros((nCG, nCG, self.npin))
for gp, g in enumerate(grouping):
sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo
)
self.sig_s_homo = sig_s_homo
self.phi_homo = phi_homo
"""
plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')
plt.legend(loc=0)
plt.xlabel('Energy [MeV]')
plt.ylabel('$\\Sigma_t$ [cm$^{-1}$]')
plt.savefig('test.pdf', transparent=True)
"""
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class XS:
def __init__(self, sig_t, sig_f, chi, sig_s, mu=None):
self.sig_t = sig_t
self.sig_f = sig_f
self.chi = chi
self.sig_s = sig_s
self.mu = mu if mu is None else np.ones(self.sig_t.shape)
def write_homogenized_XS(self, fname, mu=None):
if mu is not None:
assert mu.shape == self.sig_t.shape
self.mu = mu
G, npin = self.sig_t.shape
sig_t = self.sig_t * self.mu
vsig_f = self.sig_f * self.mu
sig_s = self.sig_s * self.mu
s = '{} {} 0\n'.format(npin, G)
s += '{}\n'.format(' '.join([str(g) for g in range(G + 1)]))
s += '{}\n'.format(' '.join([str(g) for g in range(G)]))
for mat in range(npin):
s += 'pin {}\n'.format(mat + 1)
s += '1 1 1.0 0.0 0.602214179\n'
for g in range(G):
s += '{:<12.9f} {:<12.9f} {:<12.9f} {:<12.9f}\n'.format(sig_t
[g, mat], vsig_f[g, mat], vsig_f[g, mat], self.chi[g, mat])
for g in range(G):
s += '{}\n'.format(' '.join(['{:<12.9f}'.format(s) for s in
sig_s[:, g, mat]]))
with open(fname, 'w') as f:
f.write(s[:-1])
def __add__(self, newXS):
sig_t = np.concatenate([self.sig_t, newXS.sig_t], axis=-1)
sig_f = np.concatenate([self.sig_f, newXS.sig_f], axis=-1)
sig_s = np.concatenate([self.sig_s, newXS.sig_s], axis=-1)
chi = np.concatenate([self.chi, newXS.chi], axis=-1)
mu = np.concatenate([self.mu, newXS.mu], axis=-1)
return XS(sig_t, sig_f, chi, sig_s, mu)
class DGMSOLVER:
def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,
vacuum=False, k=None, phi=None, psi=None):
"""
Inputs:
G - Number of energy groups
fname - Name of the cross section file
fm - Fine mesh
cm - Coarse mesh
mm - Material map
nPin - Number of pincells
norm - norm of the flux to keep constant (match phi shape)
mapping - structure class that holds fine -> coarse mapping
"""
self.G = G
self.fname = fname
self.fm = fm
self.cm = cm
self.mm = mm
self.npin = nPin
self.norm = norm
self.computenorm = self.norm is None
self.vacuum = vacuum
self.mapping = mapping
self.setOptions()
self.solve(k, phi, psi)
self.homogenize_space()
if self.mapping is not None:
self.homogenize_energy()
def setOptions(self):
"""
Set the options for the Unotran solve
"""
pydgm.control.spatial_dimension = 1
pydgm.control.fine_mesh_x = self.fm
pydgm.control.coarse_mesh_x = self.cm
pydgm.control.material_map = self.mm
pydgm.control.xs_name = self.fname.ljust(256)
pydgm.control.angle_order = 8
pydgm.control.angle_option = pydgm.angle.gl
pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0
pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0
pydgm.control.allow_fission = True
pydgm.control.eigen_print = 0
pydgm.control.outer_print = 0
pydgm.control.eigen_tolerance = 1e-14
pydgm.control.outer_tolerance = 1e-12
pydgm.control.max_eigen_iters = 10000
pydgm.control.max_outer_iters = 1
pydgm.control.store_psi = True
pydgm.control.solver_type = 'eigen'.ljust(256)
pydgm.control.source_value = 0.0
pydgm.control.equation_type = 'DD'
pydgm.control.scatter_leg_order = 0
pydgm.control.ignore_warnings = True
def solve(self, k, phi, psi):
"""
Solve the problem using Unotran
"""
pydgm.solver.initialize_solver()
if k is not None:
pydgm.state.keff = k
if phi is not None:
pydgm.state.phi = phi
if psi is not None:
pydgm.state.psi = psi
pydgm.solver.solve()
self.extractInfo()
self.iter_k = np.copy(pydgm.state.keff)
self.iter_phi = np.copy(pydgm.state.phi)
self.iter_psi = np.copy(pydgm.state.psi)
pydgm.solver.finalize_solver()
pydgm.control.finalize_control()
def extractInfo(self):
"""
Copy information from Unotran before the solver is deallocated
"""
self.phi = np.copy(pydgm.state.mg_phi[0])
self.dx = np.copy(pydgm.mesh.dx)
self.mat_map = np.copy(pydgm.state.mg_mmap)
self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for
c in range(len(self.mat_map))]).T
self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c
] - 1] for c in range(len(self.mat_map))]).T
self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -
1] for c in range(len(self.mat_map))]).T
self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for
c in range(len(self.mat_map))]).T
def homogenize_space(self):
"""
Homogenize the cross sections over the spatial region
"""
def homo_space(array):
"""Convenience function to do the integration"""
return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2
) / V
shape = self.phi.shape
nCellPerPin = shape[1] // self.npin
V = np.sum(self.dx.reshape(self.npin, -1), axis=1)
phi_dx = self.phi[:, :] * self.dx[:]
self.phi_homo = homo_space(phi_dx)
if self.computenorm:
self.norm = np.sum(self.phi_homo, axis=-1)
else:
print('compute norm')
norm = self.norm / np.sum(self.phi_homo, axis=-1)
self.phi_homo *= norm[:, np.newaxis]
phi_dx *= norm[:, np.newaxis]
self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo
self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo
self.chi_homo = homo_space(self.chi * self.dx)
self.sig_s_homo = np.zeros((self.G, self.G, self.npin))
for gp in range(self.G):
self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx
) / self.phi_homo
def homogenize_energy(self):
"""
Homogenize the cross sections over the energy range
"""
def homo_energy(array1, array2=None):
"""
convinence function to do the integration
return rac{\\sum_i array1[i] * array2[i]}{\\sum_i array2[i]} for each coarse group
"""
if array2 is not None:
y = np.zeros((nCG, len(array1[0])))
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g] * array2[g]
y[cg - 1] += array2[g]
return z / y
else:
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g]
return z
nCG = self.mapping.nCG
nFG = self.mapping.nFG
grouping = np.array(self.mapping.grouping)
dE_coarse = np.array(self.mapping.dE_coarse)
dE_fine = np.array(self.mapping.dE_fine)
dE_coarse /= dE_coarse
dE_fine /= dE_fine
phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])
if self.computenorm:
norm = np.zeros(nCG)
for g, cg in enumerate(grouping):
norm[cg - 1] += self.norm[g]
self.norm = norm
"""
print(self.mapping.fine_bounds)
import matplotlib.pyplot as plt
def barchart(x, y):
X = np.zeros(2 * len(y))
Y = np.zeros(2 * len(y))
for i in range(0, len(y)):
X[2 * i] = x[i]
X[2 * i + 1] = x[i + 1]
Y[2 * i] = y[i]
Y[2 * i + 1] = y[i]
return X, Y
plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')
"""
self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)
self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)
self.chi_homo = homo_energy(self.chi_homo)
sig_s_homo = np.zeros((nCG, nCG, self.npin))
for gp, g in enumerate(grouping):
sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo
)
self.sig_s_homo = sig_s_homo
self.phi_homo = phi_homo
"""
plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')
plt.legend(loc=0)
plt.xlabel('Energy [MeV]')
plt.ylabel('$\\Sigma_t$ [cm$^{-1}$]')
plt.savefig('test.pdf', transparent=True)
"""
<|reserved_special_token_1|>
import pydgm
import numpy as np
import sys
class XS():
# Hold the cross section values with routines for outputting to txt file
def __init__(self, sig_t, sig_f, chi, sig_s, mu=None):
self.sig_t = sig_t
self.sig_f = sig_f
self.chi = chi
self.sig_s = sig_s
self.mu = mu if mu is None else np.ones(self.sig_t.shape)
def write_homogenized_XS(self, fname, mu=None):
if mu is not None:
assert mu.shape == self.sig_t.shape
self.mu = mu
G, npin = self.sig_t.shape
sig_t = self.sig_t * self.mu
vsig_f = self.sig_f * self.mu
sig_s = self.sig_s * self.mu
# Write the cross sections to file
s = '{} {} 0\n'.format(npin, G)
s += '{}\n'.format(' '.join([str(g) for g in range(G + 1)]))
s += '{}\n'.format(' '.join([str(g) for g in range(G)]))
for mat in range(npin):
s += 'pin {}\n'.format(mat + 1)
s += '1 1 1.0 0.0 0.602214179\n'
for g in range(G):
s += '{:<12.9f} {:<12.9f} {:<12.9f} {:<12.9f}\n'.format(sig_t[g, mat], vsig_f[g, mat], vsig_f[g, mat], self.chi[g, mat])
for g in range(G):
s += '{}\n'.format(' '.join(['{:<12.9f}'.format(s) for s in sig_s[:, g, mat]]))
with open(fname, 'w') as f:
f.write(s[:-1])
def __add__(self, newXS):
sig_t = np.concatenate([self.sig_t, newXS.sig_t], axis=-1)
sig_f = np.concatenate([self.sig_f, newXS.sig_f], axis=-1)
sig_s = np.concatenate([self.sig_s, newXS.sig_s], axis=-1)
chi = np.concatenate([self.chi, newXS.chi], axis=-1)
mu = np.concatenate([self.mu, newXS.mu], axis=-1)
return XS(sig_t, sig_f, chi, sig_s, mu)
class DGMSOLVER():
# Solve the problem using unotran
def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None, vacuum=False, k=None, phi=None, psi=None):
'''
Inputs:
G - Number of energy groups
fname - Name of the cross section file
fm - Fine mesh
cm - Coarse mesh
mm - Material map
nPin - Number of pincells
norm - norm of the flux to keep constant (match phi shape)
mapping - structure class that holds fine -> coarse mapping
'''
self.G = G
self.fname = fname
self.fm = fm
self.cm = cm
self.mm = mm
self.npin = nPin
self.norm = norm
self.computenorm = self.norm is None
self.vacuum = vacuum
self.mapping = mapping
# Pass on the options to unotran
self.setOptions()
# Solve using unotran
self.solve(k, phi, psi)
# Homogenize the cross sections over each spatial region
self.homogenize_space()
# Homogenize the cross sections over each energy range
if self.mapping is not None:
self.homogenize_energy()
def setOptions(self):
'''
Set the options for the Unotran solve
'''
pydgm.control.spatial_dimension = 1
pydgm.control.fine_mesh_x = self.fm
pydgm.control.coarse_mesh_x = self.cm
pydgm.control.material_map = self.mm
pydgm.control.xs_name = self.fname.ljust(256)
pydgm.control.angle_order = 8
pydgm.control.angle_option = pydgm.angle.gl
pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0
pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0
pydgm.control.allow_fission = True
pydgm.control.eigen_print = 0
pydgm.control.outer_print = 0
pydgm.control.eigen_tolerance = 1e-14
pydgm.control.outer_tolerance = 1e-12
pydgm.control.max_eigen_iters = 10000
pydgm.control.max_outer_iters = 1
pydgm.control.store_psi = True
pydgm.control.solver_type = 'eigen'.ljust(256)
pydgm.control.source_value = 0.0
pydgm.control.equation_type = 'DD'
pydgm.control.scatter_leg_order = 0
pydgm.control.ignore_warnings = True
def solve(self, k, phi, psi):
'''
Solve the problem using Unotran
'''
# Initialize the problem
pydgm.solver.initialize_solver()
if k is not None:
pydgm.state.keff = k
if phi is not None:
pydgm.state.phi = phi
if psi is not None:
pydgm.state.psi = psi
# Call the solver
pydgm.solver.solve()
# Copy any information from Unotran
self.extractInfo()
self.iter_k = np.copy(pydgm.state.keff)
self.iter_phi = np.copy(pydgm.state.phi)
self.iter_psi = np.copy(pydgm.state.psi)
# Clean up the solver
pydgm.solver.finalize_solver()
pydgm.control.finalize_control()
def extractInfo(self):
'''
Copy information from Unotran before the solver is deallocated
'''
self.phi = np.copy(pydgm.state.mg_phi[0])
self.dx = np.copy(pydgm.mesh.dx)
self.mat_map = np.copy(pydgm.state.mg_mmap)
self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T
self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T
self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T
self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T
def homogenize_space(self):
'''
Homogenize the cross sections over the spatial region
'''
def homo_space(array):
'''Convenience function to do the integration'''
# sum over region
return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2) / V
# Check that everything is the right shape of arrays
shape = self.phi.shape
#assert shape[0] == self.G
#assert (shape[1] / self.npin) == (shape[1] // self.npin)
# Compute the number of pins
nCellPerPin = shape[1] // self.npin
# Compute the \sum_{g\in G} \sum_{c\in r} V_c dE_g
V = np.sum(self.dx.reshape(self.npin, -1), axis=1)
# \forall g\in G, \forall c\in r compute \phi_{g,c} V_c dE_g
# Homogenize the flux
phi_dx = self.phi[:, :] * self.dx[:]
self.phi_homo = homo_space(phi_dx)
# Either find the norm of the flux or normalize the flux to self.norm
if self.computenorm:
self.norm = np.sum(self.phi_homo, axis=-1)
else:
print('compute norm')
norm = self.norm / np.sum(self.phi_homo, axis=-1)
self.phi_homo *= norm[:, np.newaxis]
phi_dx *= norm[:, np.newaxis]
# Homogenize the cross sections
self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo
self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo
self.chi_homo = homo_space(self.chi * self.dx)
self.sig_s_homo = np.zeros((self.G, self.G, self.npin))
for gp in range(self.G):
self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx) / self.phi_homo
def homogenize_energy(self):
'''
Homogenize the cross sections over the energy range
'''
def homo_energy(array1, array2=None):
'''
convinence function to do the integration
return \frac{\sum_i array1[i] * array2[i]}{\sum_i array2[i]} for each coarse group
'''
if array2 is not None:
y = np.zeros((nCG, len(array1[0])))
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g] * array2[g]
y[cg - 1] += array2[g]
return z / y
else:
z = np.zeros((nCG, len(array1[0])))
for g, cg in enumerate(grouping):
z[cg - 1] += array1[g]
return z
nCG = self.mapping.nCG
nFG = self.mapping.nFG
grouping = np.array(self.mapping.grouping)
dE_coarse = np.array(self.mapping.dE_coarse)
dE_fine = np.array(self.mapping.dE_fine)
dE_coarse /= dE_coarse
dE_fine /= dE_fine
phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])
if self.computenorm:
norm = np.zeros(nCG)
for g, cg in enumerate(grouping):
norm[cg - 1] += self.norm[g]
self.norm = norm
'''
print(self.mapping.fine_bounds)
import matplotlib.pyplot as plt
def barchart(x, y):
X = np.zeros(2 * len(y))
Y = np.zeros(2 * len(y))
for i in range(0, len(y)):
X[2 * i] = x[i]
X[2 * i + 1] = x[i + 1]
Y[2 * i] = y[i]
Y[2 * i + 1] = y[i]
return X, Y
plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')
'''
self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)
self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)
self.chi_homo = homo_energy(self.chi_homo)
sig_s_homo = np.zeros((nCG, nCG, self.npin))
for gp, g in enumerate(grouping):
sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo)
self.sig_s_homo = sig_s_homo
self.phi_homo = phi_homo
'''
plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')
plt.legend(loc=0)
plt.xlabel('Energy [MeV]')
plt.ylabel('$\Sigma_t$ [cm$^{-1}$]')
plt.savefig('test.pdf', transparent=True)
'''
|
flexible
|
{
"blob_id": "1358adc3b2b3ffe72c0ed87fb0024f1079ca7d04",
"index": 1710,
"step-1": "<mask token>\n\n\nclass DGMSOLVER:\n\n def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,\n vacuum=False, k=None, phi=None, psi=None):\n \"\"\"\n Inputs:\n G - Number of energy groups\n fname - Name of the cross section file\n fm - Fine mesh\n cm - Coarse mesh\n mm - Material map\n nPin - Number of pincells\n norm - norm of the flux to keep constant (match phi shape)\n mapping - structure class that holds fine -> coarse mapping\n \"\"\"\n self.G = G\n self.fname = fname\n self.fm = fm\n self.cm = cm\n self.mm = mm\n self.npin = nPin\n self.norm = norm\n self.computenorm = self.norm is None\n self.vacuum = vacuum\n self.mapping = mapping\n self.setOptions()\n self.solve(k, phi, psi)\n self.homogenize_space()\n if self.mapping is not None:\n self.homogenize_energy()\n <mask token>\n\n def solve(self, k, phi, psi):\n \"\"\"\n Solve the problem using Unotran\n \"\"\"\n pydgm.solver.initialize_solver()\n if k is not None:\n pydgm.state.keff = k\n if phi is not None:\n pydgm.state.phi = phi\n if psi is not None:\n pydgm.state.psi = psi\n pydgm.solver.solve()\n self.extractInfo()\n self.iter_k = np.copy(pydgm.state.keff)\n self.iter_phi = np.copy(pydgm.state.phi)\n self.iter_psi = np.copy(pydgm.state.psi)\n pydgm.solver.finalize_solver()\n pydgm.control.finalize_control()\n\n def extractInfo(self):\n \"\"\"\n Copy information from Unotran before the solver is deallocated\n \"\"\"\n self.phi = np.copy(pydgm.state.mg_phi[0])\n self.dx = np.copy(pydgm.mesh.dx)\n self.mat_map = np.copy(pydgm.state.mg_mmap)\n self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c\n ] - 1] for c in range(len(self.mat_map))]).T\n self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -\n 1] for c in range(len(self.mat_map))]).T\n self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n\n def homogenize_space(self):\n \"\"\"\n Homogenize the cross sections over the spatial region\n \"\"\"\n\n def homo_space(array):\n \"\"\"Convenience function to do the integration\"\"\"\n return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2\n ) / V\n shape = self.phi.shape\n nCellPerPin = shape[1] // self.npin\n V = np.sum(self.dx.reshape(self.npin, -1), axis=1)\n phi_dx = self.phi[:, :] * self.dx[:]\n self.phi_homo = homo_space(phi_dx)\n if self.computenorm:\n self.norm = np.sum(self.phi_homo, axis=-1)\n else:\n print('compute norm')\n norm = self.norm / np.sum(self.phi_homo, axis=-1)\n self.phi_homo *= norm[:, np.newaxis]\n phi_dx *= norm[:, np.newaxis]\n self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo\n self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo\n self.chi_homo = homo_space(self.chi * self.dx)\n self.sig_s_homo = np.zeros((self.G, self.G, self.npin))\n for gp in range(self.G):\n self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx\n ) / self.phi_homo\n\n def homogenize_energy(self):\n \"\"\"\n Homogenize the cross sections over the energy range\n \"\"\"\n\n def homo_energy(array1, array2=None):\n \"\"\"\n convinence function to do the integration\n\n return \frac{\\\\sum_i array1[i] * array2[i]}{\\\\sum_i array2[i]} for each coarse group\n \"\"\"\n if array2 is not None:\n y = np.zeros((nCG, len(array1[0])))\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g] * array2[g]\n y[cg - 1] += array2[g]\n return z / y\n else:\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g]\n return z\n nCG = self.mapping.nCG\n nFG = self.mapping.nFG\n grouping = np.array(self.mapping.grouping)\n dE_coarse = np.array(self.mapping.dE_coarse)\n dE_fine = np.array(self.mapping.dE_fine)\n dE_coarse /= dE_coarse\n dE_fine /= dE_fine\n phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])\n if self.computenorm:\n norm = np.zeros(nCG)\n for g, cg in enumerate(grouping):\n norm[cg - 1] += self.norm[g]\n self.norm = norm\n \"\"\"\n print(self.mapping.fine_bounds)\n import matplotlib.pyplot as plt\n\n def barchart(x, y):\n X = np.zeros(2 * len(y))\n Y = np.zeros(2 * len(y))\n for i in range(0, len(y)):\n X[2 * i] = x[i]\n X[2 * i + 1] = x[i + 1]\n Y[2 * i] = y[i]\n Y[2 * i + 1] = y[i]\n return X, Y\n\n plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')\n \"\"\"\n self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)\n self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)\n self.chi_homo = homo_energy(self.chi_homo)\n sig_s_homo = np.zeros((nCG, nCG, self.npin))\n for gp, g in enumerate(grouping):\n sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo\n )\n self.sig_s_homo = sig_s_homo\n self.phi_homo = phi_homo\n \"\"\"\n plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')\n plt.legend(loc=0)\n plt.xlabel('Energy [MeV]')\n plt.ylabel('$\\\\Sigma_t$ [cm$^{-1}$]')\n plt.savefig('test.pdf', transparent=True)\n \"\"\"\n",
"step-2": "<mask token>\n\n\nclass XS:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DGMSOLVER:\n\n def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,\n vacuum=False, k=None, phi=None, psi=None):\n \"\"\"\n Inputs:\n G - Number of energy groups\n fname - Name of the cross section file\n fm - Fine mesh\n cm - Coarse mesh\n mm - Material map\n nPin - Number of pincells\n norm - norm of the flux to keep constant (match phi shape)\n mapping - structure class that holds fine -> coarse mapping\n \"\"\"\n self.G = G\n self.fname = fname\n self.fm = fm\n self.cm = cm\n self.mm = mm\n self.npin = nPin\n self.norm = norm\n self.computenorm = self.norm is None\n self.vacuum = vacuum\n self.mapping = mapping\n self.setOptions()\n self.solve(k, phi, psi)\n self.homogenize_space()\n if self.mapping is not None:\n self.homogenize_energy()\n\n def setOptions(self):\n \"\"\"\n Set the options for the Unotran solve\n \"\"\"\n pydgm.control.spatial_dimension = 1\n pydgm.control.fine_mesh_x = self.fm\n pydgm.control.coarse_mesh_x = self.cm\n pydgm.control.material_map = self.mm\n pydgm.control.xs_name = self.fname.ljust(256)\n pydgm.control.angle_order = 8\n pydgm.control.angle_option = pydgm.angle.gl\n pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0\n pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0\n pydgm.control.allow_fission = True\n pydgm.control.eigen_print = 0\n pydgm.control.outer_print = 0\n pydgm.control.eigen_tolerance = 1e-14\n pydgm.control.outer_tolerance = 1e-12\n pydgm.control.max_eigen_iters = 10000\n pydgm.control.max_outer_iters = 1\n pydgm.control.store_psi = True\n pydgm.control.solver_type = 'eigen'.ljust(256)\n pydgm.control.source_value = 0.0\n pydgm.control.equation_type = 'DD'\n pydgm.control.scatter_leg_order = 0\n pydgm.control.ignore_warnings = True\n\n def solve(self, k, phi, psi):\n \"\"\"\n Solve the problem using Unotran\n \"\"\"\n pydgm.solver.initialize_solver()\n if k is not None:\n pydgm.state.keff = k\n if phi is not None:\n pydgm.state.phi = phi\n if psi is not None:\n pydgm.state.psi = psi\n pydgm.solver.solve()\n self.extractInfo()\n self.iter_k = np.copy(pydgm.state.keff)\n self.iter_phi = np.copy(pydgm.state.phi)\n self.iter_psi = np.copy(pydgm.state.psi)\n pydgm.solver.finalize_solver()\n pydgm.control.finalize_control()\n\n def extractInfo(self):\n \"\"\"\n Copy information from Unotran before the solver is deallocated\n \"\"\"\n self.phi = np.copy(pydgm.state.mg_phi[0])\n self.dx = np.copy(pydgm.mesh.dx)\n self.mat_map = np.copy(pydgm.state.mg_mmap)\n self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c\n ] - 1] for c in range(len(self.mat_map))]).T\n self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -\n 1] for c in range(len(self.mat_map))]).T\n self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n\n def homogenize_space(self):\n \"\"\"\n Homogenize the cross sections over the spatial region\n \"\"\"\n\n def homo_space(array):\n \"\"\"Convenience function to do the integration\"\"\"\n return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2\n ) / V\n shape = self.phi.shape\n nCellPerPin = shape[1] // self.npin\n V = np.sum(self.dx.reshape(self.npin, -1), axis=1)\n phi_dx = self.phi[:, :] * self.dx[:]\n self.phi_homo = homo_space(phi_dx)\n if self.computenorm:\n self.norm = np.sum(self.phi_homo, axis=-1)\n else:\n print('compute norm')\n norm = self.norm / np.sum(self.phi_homo, axis=-1)\n self.phi_homo *= norm[:, np.newaxis]\n phi_dx *= norm[:, np.newaxis]\n self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo\n self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo\n self.chi_homo = homo_space(self.chi * self.dx)\n self.sig_s_homo = np.zeros((self.G, self.G, self.npin))\n for gp in range(self.G):\n self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx\n ) / self.phi_homo\n\n def homogenize_energy(self):\n \"\"\"\n Homogenize the cross sections over the energy range\n \"\"\"\n\n def homo_energy(array1, array2=None):\n \"\"\"\n convinence function to do the integration\n\n return \frac{\\\\sum_i array1[i] * array2[i]}{\\\\sum_i array2[i]} for each coarse group\n \"\"\"\n if array2 is not None:\n y = np.zeros((nCG, len(array1[0])))\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g] * array2[g]\n y[cg - 1] += array2[g]\n return z / y\n else:\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g]\n return z\n nCG = self.mapping.nCG\n nFG = self.mapping.nFG\n grouping = np.array(self.mapping.grouping)\n dE_coarse = np.array(self.mapping.dE_coarse)\n dE_fine = np.array(self.mapping.dE_fine)\n dE_coarse /= dE_coarse\n dE_fine /= dE_fine\n phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])\n if self.computenorm:\n norm = np.zeros(nCG)\n for g, cg in enumerate(grouping):\n norm[cg - 1] += self.norm[g]\n self.norm = norm\n \"\"\"\n print(self.mapping.fine_bounds)\n import matplotlib.pyplot as plt\n\n def barchart(x, y):\n X = np.zeros(2 * len(y))\n Y = np.zeros(2 * len(y))\n for i in range(0, len(y)):\n X[2 * i] = x[i]\n X[2 * i + 1] = x[i + 1]\n Y[2 * i] = y[i]\n Y[2 * i + 1] = y[i]\n return X, Y\n\n plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')\n \"\"\"\n self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)\n self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)\n self.chi_homo = homo_energy(self.chi_homo)\n sig_s_homo = np.zeros((nCG, nCG, self.npin))\n for gp, g in enumerate(grouping):\n sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo\n )\n self.sig_s_homo = sig_s_homo\n self.phi_homo = phi_homo\n \"\"\"\n plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')\n plt.legend(loc=0)\n plt.xlabel('Energy [MeV]')\n plt.ylabel('$\\\\Sigma_t$ [cm$^{-1}$]')\n plt.savefig('test.pdf', transparent=True)\n \"\"\"\n",
"step-3": "<mask token>\n\n\nclass XS:\n <mask token>\n\n def write_homogenized_XS(self, fname, mu=None):\n if mu is not None:\n assert mu.shape == self.sig_t.shape\n self.mu = mu\n G, npin = self.sig_t.shape\n sig_t = self.sig_t * self.mu\n vsig_f = self.sig_f * self.mu\n sig_s = self.sig_s * self.mu\n s = '{} {} 0\\n'.format(npin, G)\n s += '{}\\n'.format(' '.join([str(g) for g in range(G + 1)]))\n s += '{}\\n'.format(' '.join([str(g) for g in range(G)]))\n for mat in range(npin):\n s += 'pin {}\\n'.format(mat + 1)\n s += '1 1 1.0 0.0 0.602214179\\n'\n for g in range(G):\n s += '{:<12.9f} {:<12.9f} {:<12.9f} {:<12.9f}\\n'.format(sig_t\n [g, mat], vsig_f[g, mat], vsig_f[g, mat], self.chi[g, mat])\n for g in range(G):\n s += '{}\\n'.format(' '.join(['{:<12.9f}'.format(s) for s in\n sig_s[:, g, mat]]))\n with open(fname, 'w') as f:\n f.write(s[:-1])\n <mask token>\n\n\nclass DGMSOLVER:\n\n def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,\n vacuum=False, k=None, phi=None, psi=None):\n \"\"\"\n Inputs:\n G - Number of energy groups\n fname - Name of the cross section file\n fm - Fine mesh\n cm - Coarse mesh\n mm - Material map\n nPin - Number of pincells\n norm - norm of the flux to keep constant (match phi shape)\n mapping - structure class that holds fine -> coarse mapping\n \"\"\"\n self.G = G\n self.fname = fname\n self.fm = fm\n self.cm = cm\n self.mm = mm\n self.npin = nPin\n self.norm = norm\n self.computenorm = self.norm is None\n self.vacuum = vacuum\n self.mapping = mapping\n self.setOptions()\n self.solve(k, phi, psi)\n self.homogenize_space()\n if self.mapping is not None:\n self.homogenize_energy()\n\n def setOptions(self):\n \"\"\"\n Set the options for the Unotran solve\n \"\"\"\n pydgm.control.spatial_dimension = 1\n pydgm.control.fine_mesh_x = self.fm\n pydgm.control.coarse_mesh_x = self.cm\n pydgm.control.material_map = self.mm\n pydgm.control.xs_name = self.fname.ljust(256)\n pydgm.control.angle_order = 8\n pydgm.control.angle_option = pydgm.angle.gl\n pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0\n pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0\n pydgm.control.allow_fission = True\n pydgm.control.eigen_print = 0\n pydgm.control.outer_print = 0\n pydgm.control.eigen_tolerance = 1e-14\n pydgm.control.outer_tolerance = 1e-12\n pydgm.control.max_eigen_iters = 10000\n pydgm.control.max_outer_iters = 1\n pydgm.control.store_psi = True\n pydgm.control.solver_type = 'eigen'.ljust(256)\n pydgm.control.source_value = 0.0\n pydgm.control.equation_type = 'DD'\n pydgm.control.scatter_leg_order = 0\n pydgm.control.ignore_warnings = True\n\n def solve(self, k, phi, psi):\n \"\"\"\n Solve the problem using Unotran\n \"\"\"\n pydgm.solver.initialize_solver()\n if k is not None:\n pydgm.state.keff = k\n if phi is not None:\n pydgm.state.phi = phi\n if psi is not None:\n pydgm.state.psi = psi\n pydgm.solver.solve()\n self.extractInfo()\n self.iter_k = np.copy(pydgm.state.keff)\n self.iter_phi = np.copy(pydgm.state.phi)\n self.iter_psi = np.copy(pydgm.state.psi)\n pydgm.solver.finalize_solver()\n pydgm.control.finalize_control()\n\n def extractInfo(self):\n \"\"\"\n Copy information from Unotran before the solver is deallocated\n \"\"\"\n self.phi = np.copy(pydgm.state.mg_phi[0])\n self.dx = np.copy(pydgm.mesh.dx)\n self.mat_map = np.copy(pydgm.state.mg_mmap)\n self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c\n ] - 1] for c in range(len(self.mat_map))]).T\n self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -\n 1] for c in range(len(self.mat_map))]).T\n self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n\n def homogenize_space(self):\n \"\"\"\n Homogenize the cross sections over the spatial region\n \"\"\"\n\n def homo_space(array):\n \"\"\"Convenience function to do the integration\"\"\"\n return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2\n ) / V\n shape = self.phi.shape\n nCellPerPin = shape[1] // self.npin\n V = np.sum(self.dx.reshape(self.npin, -1), axis=1)\n phi_dx = self.phi[:, :] * self.dx[:]\n self.phi_homo = homo_space(phi_dx)\n if self.computenorm:\n self.norm = np.sum(self.phi_homo, axis=-1)\n else:\n print('compute norm')\n norm = self.norm / np.sum(self.phi_homo, axis=-1)\n self.phi_homo *= norm[:, np.newaxis]\n phi_dx *= norm[:, np.newaxis]\n self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo\n self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo\n self.chi_homo = homo_space(self.chi * self.dx)\n self.sig_s_homo = np.zeros((self.G, self.G, self.npin))\n for gp in range(self.G):\n self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx\n ) / self.phi_homo\n\n def homogenize_energy(self):\n \"\"\"\n Homogenize the cross sections over the energy range\n \"\"\"\n\n def homo_energy(array1, array2=None):\n \"\"\"\n convinence function to do the integration\n\n return \frac{\\\\sum_i array1[i] * array2[i]}{\\\\sum_i array2[i]} for each coarse group\n \"\"\"\n if array2 is not None:\n y = np.zeros((nCG, len(array1[0])))\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g] * array2[g]\n y[cg - 1] += array2[g]\n return z / y\n else:\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g]\n return z\n nCG = self.mapping.nCG\n nFG = self.mapping.nFG\n grouping = np.array(self.mapping.grouping)\n dE_coarse = np.array(self.mapping.dE_coarse)\n dE_fine = np.array(self.mapping.dE_fine)\n dE_coarse /= dE_coarse\n dE_fine /= dE_fine\n phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])\n if self.computenorm:\n norm = np.zeros(nCG)\n for g, cg in enumerate(grouping):\n norm[cg - 1] += self.norm[g]\n self.norm = norm\n \"\"\"\n print(self.mapping.fine_bounds)\n import matplotlib.pyplot as plt\n\n def barchart(x, y):\n X = np.zeros(2 * len(y))\n Y = np.zeros(2 * len(y))\n for i in range(0, len(y)):\n X[2 * i] = x[i]\n X[2 * i + 1] = x[i + 1]\n Y[2 * i] = y[i]\n Y[2 * i + 1] = y[i]\n return X, Y\n\n plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')\n \"\"\"\n self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)\n self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)\n self.chi_homo = homo_energy(self.chi_homo)\n sig_s_homo = np.zeros((nCG, nCG, self.npin))\n for gp, g in enumerate(grouping):\n sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo\n )\n self.sig_s_homo = sig_s_homo\n self.phi_homo = phi_homo\n \"\"\"\n plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')\n plt.legend(loc=0)\n plt.xlabel('Energy [MeV]')\n plt.ylabel('$\\\\Sigma_t$ [cm$^{-1}$]')\n plt.savefig('test.pdf', transparent=True)\n \"\"\"\n",
"step-4": "<mask token>\n\n\nclass XS:\n\n def __init__(self, sig_t, sig_f, chi, sig_s, mu=None):\n self.sig_t = sig_t\n self.sig_f = sig_f\n self.chi = chi\n self.sig_s = sig_s\n self.mu = mu if mu is None else np.ones(self.sig_t.shape)\n\n def write_homogenized_XS(self, fname, mu=None):\n if mu is not None:\n assert mu.shape == self.sig_t.shape\n self.mu = mu\n G, npin = self.sig_t.shape\n sig_t = self.sig_t * self.mu\n vsig_f = self.sig_f * self.mu\n sig_s = self.sig_s * self.mu\n s = '{} {} 0\\n'.format(npin, G)\n s += '{}\\n'.format(' '.join([str(g) for g in range(G + 1)]))\n s += '{}\\n'.format(' '.join([str(g) for g in range(G)]))\n for mat in range(npin):\n s += 'pin {}\\n'.format(mat + 1)\n s += '1 1 1.0 0.0 0.602214179\\n'\n for g in range(G):\n s += '{:<12.9f} {:<12.9f} {:<12.9f} {:<12.9f}\\n'.format(sig_t\n [g, mat], vsig_f[g, mat], vsig_f[g, mat], self.chi[g, mat])\n for g in range(G):\n s += '{}\\n'.format(' '.join(['{:<12.9f}'.format(s) for s in\n sig_s[:, g, mat]]))\n with open(fname, 'w') as f:\n f.write(s[:-1])\n\n def __add__(self, newXS):\n sig_t = np.concatenate([self.sig_t, newXS.sig_t], axis=-1)\n sig_f = np.concatenate([self.sig_f, newXS.sig_f], axis=-1)\n sig_s = np.concatenate([self.sig_s, newXS.sig_s], axis=-1)\n chi = np.concatenate([self.chi, newXS.chi], axis=-1)\n mu = np.concatenate([self.mu, newXS.mu], axis=-1)\n return XS(sig_t, sig_f, chi, sig_s, mu)\n\n\nclass DGMSOLVER:\n\n def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None,\n vacuum=False, k=None, phi=None, psi=None):\n \"\"\"\n Inputs:\n G - Number of energy groups\n fname - Name of the cross section file\n fm - Fine mesh\n cm - Coarse mesh\n mm - Material map\n nPin - Number of pincells\n norm - norm of the flux to keep constant (match phi shape)\n mapping - structure class that holds fine -> coarse mapping\n \"\"\"\n self.G = G\n self.fname = fname\n self.fm = fm\n self.cm = cm\n self.mm = mm\n self.npin = nPin\n self.norm = norm\n self.computenorm = self.norm is None\n self.vacuum = vacuum\n self.mapping = mapping\n self.setOptions()\n self.solve(k, phi, psi)\n self.homogenize_space()\n if self.mapping is not None:\n self.homogenize_energy()\n\n def setOptions(self):\n \"\"\"\n Set the options for the Unotran solve\n \"\"\"\n pydgm.control.spatial_dimension = 1\n pydgm.control.fine_mesh_x = self.fm\n pydgm.control.coarse_mesh_x = self.cm\n pydgm.control.material_map = self.mm\n pydgm.control.xs_name = self.fname.ljust(256)\n pydgm.control.angle_order = 8\n pydgm.control.angle_option = pydgm.angle.gl\n pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0\n pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0\n pydgm.control.allow_fission = True\n pydgm.control.eigen_print = 0\n pydgm.control.outer_print = 0\n pydgm.control.eigen_tolerance = 1e-14\n pydgm.control.outer_tolerance = 1e-12\n pydgm.control.max_eigen_iters = 10000\n pydgm.control.max_outer_iters = 1\n pydgm.control.store_psi = True\n pydgm.control.solver_type = 'eigen'.ljust(256)\n pydgm.control.source_value = 0.0\n pydgm.control.equation_type = 'DD'\n pydgm.control.scatter_leg_order = 0\n pydgm.control.ignore_warnings = True\n\n def solve(self, k, phi, psi):\n \"\"\"\n Solve the problem using Unotran\n \"\"\"\n pydgm.solver.initialize_solver()\n if k is not None:\n pydgm.state.keff = k\n if phi is not None:\n pydgm.state.phi = phi\n if psi is not None:\n pydgm.state.psi = psi\n pydgm.solver.solve()\n self.extractInfo()\n self.iter_k = np.copy(pydgm.state.keff)\n self.iter_phi = np.copy(pydgm.state.phi)\n self.iter_psi = np.copy(pydgm.state.psi)\n pydgm.solver.finalize_solver()\n pydgm.control.finalize_control()\n\n def extractInfo(self):\n \"\"\"\n Copy information from Unotran before the solver is deallocated\n \"\"\"\n self.phi = np.copy(pydgm.state.mg_phi[0])\n self.dx = np.copy(pydgm.mesh.dx)\n self.mat_map = np.copy(pydgm.state.mg_mmap)\n self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c\n ] - 1] for c in range(len(self.mat_map))]).T\n self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] -\n 1] for c in range(len(self.mat_map))]).T\n self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for\n c in range(len(self.mat_map))]).T\n\n def homogenize_space(self):\n \"\"\"\n Homogenize the cross sections over the spatial region\n \"\"\"\n\n def homo_space(array):\n \"\"\"Convenience function to do the integration\"\"\"\n return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2\n ) / V\n shape = self.phi.shape\n nCellPerPin = shape[1] // self.npin\n V = np.sum(self.dx.reshape(self.npin, -1), axis=1)\n phi_dx = self.phi[:, :] * self.dx[:]\n self.phi_homo = homo_space(phi_dx)\n if self.computenorm:\n self.norm = np.sum(self.phi_homo, axis=-1)\n else:\n print('compute norm')\n norm = self.norm / np.sum(self.phi_homo, axis=-1)\n self.phi_homo *= norm[:, np.newaxis]\n phi_dx *= norm[:, np.newaxis]\n self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo\n self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo\n self.chi_homo = homo_space(self.chi * self.dx)\n self.sig_s_homo = np.zeros((self.G, self.G, self.npin))\n for gp in range(self.G):\n self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx\n ) / self.phi_homo\n\n def homogenize_energy(self):\n \"\"\"\n Homogenize the cross sections over the energy range\n \"\"\"\n\n def homo_energy(array1, array2=None):\n \"\"\"\n convinence function to do the integration\n\n return \frac{\\\\sum_i array1[i] * array2[i]}{\\\\sum_i array2[i]} for each coarse group\n \"\"\"\n if array2 is not None:\n y = np.zeros((nCG, len(array1[0])))\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g] * array2[g]\n y[cg - 1] += array2[g]\n return z / y\n else:\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g]\n return z\n nCG = self.mapping.nCG\n nFG = self.mapping.nFG\n grouping = np.array(self.mapping.grouping)\n dE_coarse = np.array(self.mapping.dE_coarse)\n dE_fine = np.array(self.mapping.dE_fine)\n dE_coarse /= dE_coarse\n dE_fine /= dE_fine\n phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])\n if self.computenorm:\n norm = np.zeros(nCG)\n for g, cg in enumerate(grouping):\n norm[cg - 1] += self.norm[g]\n self.norm = norm\n \"\"\"\n print(self.mapping.fine_bounds)\n import matplotlib.pyplot as plt\n\n def barchart(x, y):\n X = np.zeros(2 * len(y))\n Y = np.zeros(2 * len(y))\n for i in range(0, len(y)):\n X[2 * i] = x[i]\n X[2 * i + 1] = x[i + 1]\n Y[2 * i] = y[i]\n Y[2 * i + 1] = y[i]\n return X, Y\n\n plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')\n \"\"\"\n self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)\n self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)\n self.chi_homo = homo_energy(self.chi_homo)\n sig_s_homo = np.zeros((nCG, nCG, self.npin))\n for gp, g in enumerate(grouping):\n sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo\n )\n self.sig_s_homo = sig_s_homo\n self.phi_homo = phi_homo\n \"\"\"\n plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')\n plt.legend(loc=0)\n plt.xlabel('Energy [MeV]')\n plt.ylabel('$\\\\Sigma_t$ [cm$^{-1}$]')\n plt.savefig('test.pdf', transparent=True)\n \"\"\"\n",
"step-5": "import pydgm\nimport numpy as np\nimport sys\n\n\nclass XS():\n\n # Hold the cross section values with routines for outputting to txt file\n def __init__(self, sig_t, sig_f, chi, sig_s, mu=None):\n self.sig_t = sig_t\n self.sig_f = sig_f\n self.chi = chi\n self.sig_s = sig_s\n self.mu = mu if mu is None else np.ones(self.sig_t.shape)\n\n def write_homogenized_XS(self, fname, mu=None):\n if mu is not None:\n assert mu.shape == self.sig_t.shape\n self.mu = mu\n\n G, npin = self.sig_t.shape\n\n sig_t = self.sig_t * self.mu\n vsig_f = self.sig_f * self.mu\n sig_s = self.sig_s * self.mu\n\n # Write the cross sections to file\n s = '{} {} 0\\n'.format(npin, G)\n s += '{}\\n'.format(' '.join([str(g) for g in range(G + 1)]))\n s += '{}\\n'.format(' '.join([str(g) for g in range(G)]))\n for mat in range(npin):\n s += 'pin {}\\n'.format(mat + 1)\n s += '1 1 1.0 0.0 0.602214179\\n'\n\n for g in range(G):\n s += '{:<12.9f} {:<12.9f} {:<12.9f} {:<12.9f}\\n'.format(sig_t[g, mat], vsig_f[g, mat], vsig_f[g, mat], self.chi[g, mat])\n for g in range(G):\n s += '{}\\n'.format(' '.join(['{:<12.9f}'.format(s) for s in sig_s[:, g, mat]]))\n\n with open(fname, 'w') as f:\n f.write(s[:-1])\n\n def __add__(self, newXS):\n sig_t = np.concatenate([self.sig_t, newXS.sig_t], axis=-1)\n sig_f = np.concatenate([self.sig_f, newXS.sig_f], axis=-1)\n sig_s = np.concatenate([self.sig_s, newXS.sig_s], axis=-1)\n chi = np.concatenate([self.chi, newXS.chi], axis=-1)\n mu = np.concatenate([self.mu, newXS.mu], axis=-1)\n\n return XS(sig_t, sig_f, chi, sig_s, mu)\n\n\nclass DGMSOLVER():\n\n # Solve the problem using unotran\n def __init__(self, G, fname, fm, cm, mm, nPin, norm=None, mapping=None, vacuum=False, k=None, phi=None, psi=None):\n '''\n Inputs:\n G - Number of energy groups\n fname - Name of the cross section file\n fm - Fine mesh\n cm - Coarse mesh\n mm - Material map\n nPin - Number of pincells\n norm - norm of the flux to keep constant (match phi shape)\n mapping - structure class that holds fine -> coarse mapping\n '''\n\n self.G = G\n self.fname = fname\n self.fm = fm\n self.cm = cm\n self.mm = mm\n self.npin = nPin\n self.norm = norm\n self.computenorm = self.norm is None\n self.vacuum = vacuum\n\n self.mapping = mapping\n # Pass on the options to unotran\n self.setOptions()\n # Solve using unotran\n self.solve(k, phi, psi)\n # Homogenize the cross sections over each spatial region\n self.homogenize_space()\n # Homogenize the cross sections over each energy range\n if self.mapping is not None:\n self.homogenize_energy()\n\n def setOptions(self):\n '''\n Set the options for the Unotran solve\n '''\n pydgm.control.spatial_dimension = 1\n pydgm.control.fine_mesh_x = self.fm\n pydgm.control.coarse_mesh_x = self.cm\n pydgm.control.material_map = self.mm\n pydgm.control.xs_name = self.fname.ljust(256)\n pydgm.control.angle_order = 8\n pydgm.control.angle_option = pydgm.angle.gl\n pydgm.control.boundary_west = 0.0 if self.vacuum else 1.0\n pydgm.control.boundary_east = 0.0 if self.vacuum else 1.0\n pydgm.control.allow_fission = True\n pydgm.control.eigen_print = 0\n pydgm.control.outer_print = 0\n pydgm.control.eigen_tolerance = 1e-14\n pydgm.control.outer_tolerance = 1e-12\n pydgm.control.max_eigen_iters = 10000\n pydgm.control.max_outer_iters = 1\n pydgm.control.store_psi = True\n pydgm.control.solver_type = 'eigen'.ljust(256)\n pydgm.control.source_value = 0.0\n pydgm.control.equation_type = 'DD'\n pydgm.control.scatter_leg_order = 0\n pydgm.control.ignore_warnings = True\n\n def solve(self, k, phi, psi):\n '''\n Solve the problem using Unotran\n '''\n\n # Initialize the problem\n pydgm.solver.initialize_solver()\n\n if k is not None:\n pydgm.state.keff = k\n if phi is not None:\n pydgm.state.phi = phi\n if psi is not None:\n pydgm.state.psi = psi\n\n # Call the solver\n pydgm.solver.solve()\n\n # Copy any information from Unotran\n self.extractInfo()\n\n self.iter_k = np.copy(pydgm.state.keff)\n self.iter_phi = np.copy(pydgm.state.phi)\n self.iter_psi = np.copy(pydgm.state.psi)\n\n # Clean up the solver\n pydgm.solver.finalize_solver()\n pydgm.control.finalize_control()\n\n def extractInfo(self):\n '''\n Copy information from Unotran before the solver is deallocated\n '''\n self.phi = np.copy(pydgm.state.mg_phi[0])\n self.dx = np.copy(pydgm.mesh.dx)\n self.mat_map = np.copy(pydgm.state.mg_mmap)\n self.sig_t = np.array([pydgm.state.mg_sig_t[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T\n self.sig_s = np.array([pydgm.state.mg_sig_s[0, :, :, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T\n self.vsig_f = np.array([pydgm.state.mg_nu_sig_f[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T\n self.chi = np.array([pydgm.state.mg_chi[:, self.mat_map[c] - 1] for c in range(len(self.mat_map))]).T\n\n def homogenize_space(self):\n '''\n Homogenize the cross sections over the spatial region\n '''\n\n def homo_space(array):\n '''Convenience function to do the integration'''\n # sum over region\n return np.sum(array.reshape(-1, self.npin, nCellPerPin), axis=2) / V\n\n # Check that everything is the right shape of arrays\n shape = self.phi.shape\n #assert shape[0] == self.G\n #assert (shape[1] / self.npin) == (shape[1] // self.npin)\n\n # Compute the number of pins\n nCellPerPin = shape[1] // self.npin\n\n # Compute the \\sum_{g\\in G} \\sum_{c\\in r} V_c dE_g\n V = np.sum(self.dx.reshape(self.npin, -1), axis=1)\n\n # \\forall g\\in G, \\forall c\\in r compute \\phi_{g,c} V_c dE_g\n # Homogenize the flux\n phi_dx = self.phi[:, :] * self.dx[:]\n self.phi_homo = homo_space(phi_dx)\n\n # Either find the norm of the flux or normalize the flux to self.norm\n if self.computenorm:\n self.norm = np.sum(self.phi_homo, axis=-1)\n else:\n print('compute norm')\n norm = self.norm / np.sum(self.phi_homo, axis=-1)\n self.phi_homo *= norm[:, np.newaxis]\n phi_dx *= norm[:, np.newaxis]\n\n # Homogenize the cross sections\n self.sig_t_homo = homo_space(self.sig_t * phi_dx) / self.phi_homo\n self.sig_f_homo = homo_space(self.vsig_f * phi_dx) / self.phi_homo\n self.chi_homo = homo_space(self.chi * self.dx)\n self.sig_s_homo = np.zeros((self.G, self.G, self.npin))\n for gp in range(self.G):\n self.sig_s_homo[gp] = homo_space(self.sig_s[gp] * phi_dx) / self.phi_homo\n\n def homogenize_energy(self):\n '''\n Homogenize the cross sections over the energy range\n '''\n\n def homo_energy(array1, array2=None):\n '''\n convinence function to do the integration\n\n return \\frac{\\sum_i array1[i] * array2[i]}{\\sum_i array2[i]} for each coarse group\n '''\n if array2 is not None:\n y = np.zeros((nCG, len(array1[0])))\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g] * array2[g]\n y[cg - 1] += array2[g]\n\n return z / y\n else:\n z = np.zeros((nCG, len(array1[0])))\n for g, cg in enumerate(grouping):\n z[cg - 1] += array1[g]\n return z\n\n nCG = self.mapping.nCG\n nFG = self.mapping.nFG\n grouping = np.array(self.mapping.grouping)\n\n dE_coarse = np.array(self.mapping.dE_coarse)\n dE_fine = np.array(self.mapping.dE_fine)\n dE_coarse /= dE_coarse\n dE_fine /= dE_fine\n\n phi_homo = homo_energy(self.phi_homo, dE_fine[:, np.newaxis])\n\n if self.computenorm:\n norm = np.zeros(nCG)\n for g, cg in enumerate(grouping):\n norm[cg - 1] += self.norm[g]\n self.norm = norm\n\n '''\n print(self.mapping.fine_bounds)\n import matplotlib.pyplot as plt\n\n def barchart(x, y):\n X = np.zeros(2 * len(y))\n Y = np.zeros(2 * len(y))\n for i in range(0, len(y)):\n X[2 * i] = x[i]\n X[2 * i + 1] = x[i + 1]\n Y[2 * i] = y[i]\n Y[2 * i + 1] = y[i]\n return X, Y\n\n plt.loglog(*barchart(self.mapping.fine_bounds, self.sig_t_homo[:,0]), 'g-', label='fine group')\n '''\n\n self.sig_t_homo = homo_energy(self.sig_t_homo, self.phi_homo)\n self.sig_f_homo = homo_energy(self.sig_f_homo, self.phi_homo)\n self.chi_homo = homo_energy(self.chi_homo)\n sig_s_homo = np.zeros((nCG, nCG, self.npin))\n for gp, g in enumerate(grouping):\n sig_s_homo[g - 1] += homo_energy(self.sig_s_homo[gp], self.phi_homo)\n self.sig_s_homo = sig_s_homo\n self.phi_homo = phi_homo\n\n '''\n plt.loglog(*barchart(self.mapping.coarse_bounds, self.sig_t_homo[:,0]), 'k-', label='coarse group')\n plt.legend(loc=0)\n plt.xlabel('Energy [MeV]')\n plt.ylabel('$\\Sigma_t$ [cm$^{-1}$]')\n plt.savefig('test.pdf', transparent=True)\n '''\n",
"step-ids": [
6,
8,
9,
11,
13
]
}
|
[
6,
8,
9,
11,
13
] |
from wagtail.tests.utils import WagtailPageTests
from setup_guide.models import SetupGuideLandingPage, SetupGuidePage
from home.models import HomePage
class SetupGuideLandingPageTests(WagtailPageTests):
def test_can_create_under_homepage(self):
self.assertCanCreateAt(HomePage, SetupGuideLandingPage)
def test_setup_guide_page_subpages(self):
# A SetupGuidePage can only have other SetupGuidePage children
self.assertAllowedSubpageTypes(
SetupGuideLandingPage, {SetupGuidePage})
class SetupGuidePageTests(WagtailPageTests):
def test_can_create_under_landing_page(self):
self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)
|
normal
|
{
"blob_id": "5fdcbccb99880da79eb0efbdecd328ca1cf73d7f",
"index": 1415,
"step-1": "<mask token>\n\n\nclass SetupGuideLandingPageTests(WagtailPageTests):\n <mask token>\n <mask token>\n\n\nclass SetupGuidePageTests(WagtailPageTests):\n\n def test_can_create_under_landing_page(self):\n self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)\n",
"step-2": "<mask token>\n\n\nclass SetupGuideLandingPageTests(WagtailPageTests):\n\n def test_can_create_under_homepage(self):\n self.assertCanCreateAt(HomePage, SetupGuideLandingPage)\n <mask token>\n\n\nclass SetupGuidePageTests(WagtailPageTests):\n\n def test_can_create_under_landing_page(self):\n self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)\n",
"step-3": "<mask token>\n\n\nclass SetupGuideLandingPageTests(WagtailPageTests):\n\n def test_can_create_under_homepage(self):\n self.assertCanCreateAt(HomePage, SetupGuideLandingPage)\n\n def test_setup_guide_page_subpages(self):\n self.assertAllowedSubpageTypes(SetupGuideLandingPage, {SetupGuidePage})\n\n\nclass SetupGuidePageTests(WagtailPageTests):\n\n def test_can_create_under_landing_page(self):\n self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)\n",
"step-4": "from wagtail.tests.utils import WagtailPageTests\nfrom setup_guide.models import SetupGuideLandingPage, SetupGuidePage\nfrom home.models import HomePage\n\n\nclass SetupGuideLandingPageTests(WagtailPageTests):\n\n def test_can_create_under_homepage(self):\n self.assertCanCreateAt(HomePage, SetupGuideLandingPage)\n\n def test_setup_guide_page_subpages(self):\n self.assertAllowedSubpageTypes(SetupGuideLandingPage, {SetupGuidePage})\n\n\nclass SetupGuidePageTests(WagtailPageTests):\n\n def test_can_create_under_landing_page(self):\n self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)\n",
"step-5": "from wagtail.tests.utils import WagtailPageTests\nfrom setup_guide.models import SetupGuideLandingPage, SetupGuidePage\nfrom home.models import HomePage\n\n\nclass SetupGuideLandingPageTests(WagtailPageTests):\n def test_can_create_under_homepage(self):\n self.assertCanCreateAt(HomePage, SetupGuideLandingPage)\n\n def test_setup_guide_page_subpages(self):\n # A SetupGuidePage can only have other SetupGuidePage children\n self.assertAllowedSubpageTypes(\n SetupGuideLandingPage, {SetupGuidePage})\n\n\nclass SetupGuidePageTests(WagtailPageTests):\n def test_can_create_under_landing_page(self):\n self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.